> provides the best dev mode experience.
diff --git a/extensions/amazon-lambda-rest/maven-archetype/src/main/resources/archetype-resources/src/main/java/GreetingVertx.java b/extensions/amazon-lambda-rest/maven-archetype/src/main/resources/archetype-resources/src/main/java/GreetingVertx.java
index f9aad01092494..aad0fac75be16 100644
--- a/extensions/amazon-lambda-rest/maven-archetype/src/main/resources/archetype-resources/src/main/java/GreetingVertx.java
+++ b/extensions/amazon-lambda-rest/maven-archetype/src/main/resources/archetype-resources/src/main/java/GreetingVertx.java
@@ -1,6 +1,6 @@
package ${package};
-import static io.quarkus.vertx.web.Route.HttpMethod.*;
+import static io.quarkus.vertx.web.Route.HttpMethod.GET;
import io.quarkus.vertx.web.Route;
import io.vertx.ext.web.RoutingContext;
diff --git a/extensions/azure-functions-http/runtime/src/main/java/io/quarkus/azure/functions/resteasy/runtime/Function.java b/extensions/azure-functions-http/runtime/src/main/java/io/quarkus/azure/functions/resteasy/runtime/Function.java
index e34cd8f0099d1..f3f3821443e8e 100644
--- a/extensions/azure-functions-http/runtime/src/main/java/io/quarkus/azure/functions/resteasy/runtime/Function.java
+++ b/extensions/azure-functions-http/runtime/src/main/java/io/quarkus/azure/functions/resteasy/runtime/Function.java
@@ -1,6 +1,14 @@
package io.quarkus.azure.functions.resteasy.runtime;
-import static com.microsoft.azure.functions.HttpMethod.*;
+import static com.microsoft.azure.functions.HttpMethod.CONNECT;
+import static com.microsoft.azure.functions.HttpMethod.DELETE;
+import static com.microsoft.azure.functions.HttpMethod.GET;
+import static com.microsoft.azure.functions.HttpMethod.HEAD;
+import static com.microsoft.azure.functions.HttpMethod.OPTIONS;
+import static com.microsoft.azure.functions.HttpMethod.PATCH;
+import static com.microsoft.azure.functions.HttpMethod.POST;
+import static com.microsoft.azure.functions.HttpMethod.PUT;
+import static com.microsoft.azure.functions.HttpMethod.TRACE;
import java.util.Optional;
diff --git a/extensions/cyclonedx/generator/src/main/java/io/quarkus/cyclonedx/generator/CycloneDxSbomGenerator.java b/extensions/cyclonedx/generator/src/main/java/io/quarkus/cyclonedx/generator/CycloneDxSbomGenerator.java
index eb3be5e9bd786..533b87d86b54b 100644
--- a/extensions/cyclonedx/generator/src/main/java/io/quarkus/cyclonedx/generator/CycloneDxSbomGenerator.java
+++ b/extensions/cyclonedx/generator/src/main/java/io/quarkus/cyclonedx/generator/CycloneDxSbomGenerator.java
@@ -8,7 +8,14 @@
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.TreeMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.model.MailingList;
diff --git a/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesConfig.java b/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesConfig.java
index e97eef86dad8d..e1adb8178adb7 100644
--- a/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesConfig.java
+++ b/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesConfig.java
@@ -25,9 +25,7 @@ public interface OidcDevServicesConfig {
/**
* A map of roles for OIDC identity provider users.
*
- * If empty, default roles are assigned: `alice` receives `admin` and `user` roles, while other users receive
- * `user` role.
- * This map is used for role creation when no realm file is found at the `realm-path`.
+ * If empty, default roles are assigned: user `alice` receives `admin` and `user` roles and user `bob` receives role `user`.
*/
@ConfigDocMapKey("role-name")
Map> roles();
diff --git a/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesProcessor.java b/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesProcessor.java
index 1729e3ad64e70..1e03ae0158540 100644
--- a/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesProcessor.java
+++ b/extensions/devservices/oidc/src/main/java/io/quarkus/devservices/oidc/OidcDevServicesProcessor.java
@@ -192,9 +192,6 @@ private static void registerRoutes(Router router) {
router.get("/logout").handler(OidcDevServicesProcessor::logout);
router.get("/userinfo").handler(OidcDevServicesProcessor::userInfo);
- // can be used for testing of bearer token authentication
- router.get("/testing/generate/access-token").handler(OidcDevServicesProcessor::generateAccessToken);
-
KeyPairGenerator kpg;
try {
kpg = KeyPairGenerator.getInstance("RSA");
@@ -206,22 +203,6 @@ private static void registerRoutes(Router router) {
kid = createKeyId();
}
- private static void generateAccessToken(RoutingContext rc) {
- String user = rc.request().getParam("user");
- if (user == null || user.isEmpty()) {
- rc.response().setStatusCode(400).endAndForget("Missing required parameter: user");
- return;
- }
- String rolesParam = rc.request().getParam("roles");
- Set roles = new HashSet<>();
- if (rolesParam == null || rolesParam.isEmpty()) {
- roles.addAll(getUserRoles(user));
- } else {
- roles.addAll(Arrays.asList(rolesParam.split(",")));
- }
- rc.response().endAndForget(createAccessToken(user, roles, Set.of("openid", "email")));
- }
-
private static List getUsers() {
if (userToDefaultRoles.isEmpty()) {
return Arrays.asList("alice", "bob");
diff --git a/extensions/funqy/funqy-server-common/deployment/src/main/java/io/quarkus/funqy/deployment/FunctionScannerBuildStep.java b/extensions/funqy/funqy-server-common/deployment/src/main/java/io/quarkus/funqy/deployment/FunctionScannerBuildStep.java
index 0064bf54ed611..15429b2e95a0d 100644
--- a/extensions/funqy/funqy-server-common/deployment/src/main/java/io/quarkus/funqy/deployment/FunctionScannerBuildStep.java
+++ b/extensions/funqy/funqy-server-common/deployment/src/main/java/io/quarkus/funqy/deployment/FunctionScannerBuildStep.java
@@ -1,7 +1,9 @@
package io.quarkus.funqy.deployment;
import static io.quarkus.deployment.annotations.ExecutionTime.STATIC_INIT;
-import static io.quarkus.funqy.deployment.ReflectionRegistrationUtil.*;
+import static io.quarkus.funqy.deployment.ReflectionRegistrationUtil.IGNORE_FIELD_FOR_REFLECTION_PREDICATE;
+import static io.quarkus.funqy.deployment.ReflectionRegistrationUtil.IGNORE_METHOD_FOR_REFLECTION_PREDICATE;
+import static io.quarkus.funqy.deployment.ReflectionRegistrationUtil.IGNORE_TYPE_FOR_REFLECTION_PREDICATE;
import java.lang.reflect.Modifier;
import java.util.Collection;
diff --git a/extensions/kafka-client/runtime/src/main/java/io/quarkus/kafka/client/runtime/devui/util/ConsumerFactory.java b/extensions/kafka-client/runtime/src/main/java/io/quarkus/kafka/client/runtime/devui/util/ConsumerFactory.java
index 34a577ead19a9..21132e6a16880 100644
--- a/extensions/kafka-client/runtime/src/main/java/io/quarkus/kafka/client/runtime/devui/util/ConsumerFactory.java
+++ b/extensions/kafka-client/runtime/src/main/java/io/quarkus/kafka/client/runtime/devui/util/ConsumerFactory.java
@@ -1,6 +1,10 @@
package io.quarkus.kafka.client.runtime.devui.util;
-import java.util.*;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
diff --git a/extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/EnvVarValidator.java b/extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/EnvVarValidator.java
index 85c62e5aa0f7c..f5e6e674edce0 100644
--- a/extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/EnvVarValidator.java
+++ b/extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/EnvVarValidator.java
@@ -1,6 +1,12 @@
package io.quarkus.kubernetes.deployment;
-import java.util.*;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import java.util.stream.Collectors;
import org.jboss.logging.Logger;
diff --git a/extensions/netty/deployment/src/main/java/io/quarkus/netty/deployment/NettyProcessor.java b/extensions/netty/deployment/src/main/java/io/quarkus/netty/deployment/NettyProcessor.java
index c5469651ce15f..98a0d370ffcaf 100644
--- a/extensions/netty/deployment/src/main/java/io/quarkus/netty/deployment/NettyProcessor.java
+++ b/extensions/netty/deployment/src/main/java/io/quarkus/netty/deployment/NettyProcessor.java
@@ -79,6 +79,11 @@ public SystemPropertyBuildItem setNettyMachineId() {
return new SystemPropertyBuildItem("io.netty.machineId", nettyMachineId);
}
+ @BuildStep
+ public SystemPropertyBuildItem disableFinalizers() {
+ return new SystemPropertyBuildItem("io.netty.allocator.disableCacheFinalizersForFastThreadLocalThreads", "true");
+ }
+
@BuildStep
NativeImageConfigBuildItem build(
NettyBuildTimeConfig config,
diff --git a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/AbstractDevUIProcessor.java b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/AbstractDevUIProcessor.java
index e7342b6f7727c..383dd938ddfc3 100644
--- a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/AbstractDevUIProcessor.java
+++ b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/AbstractDevUIProcessor.java
@@ -46,7 +46,7 @@ protected static CardPageBuildItem createProviderWebComponent(OidcDevUiRecorder
Map keycloakUsers,
List keycloakRealms,
boolean alwaysLogoutUserInDevUiOnReload,
- HttpConfiguration httpConfiguration) {
+ HttpConfiguration httpConfiguration, boolean discoverMetadata, String authServerUrl) {
final CardPageBuildItem cardPage = new CardPageBuildItem();
// prepare provider component
@@ -82,7 +82,8 @@ protected static CardPageBuildItem createProviderWebComponent(OidcDevUiRecorder
authorizationUrl, tokenUrl, logoutUrl, webClientTimeout, grantOptions,
keycloakUsers, oidcProviderName, oidcApplicationType, oidcGrantType,
introspectionIsAvailable, keycloakAdminUrl, keycloakRealms, swaggerIsAvailable,
- graphqlIsAvailable, swaggerUiPath, graphqlUiPath, alwaysLogoutUserInDevUiOnReload);
+ graphqlIsAvailable, swaggerUiPath, graphqlUiPath, alwaysLogoutUserInDevUiOnReload, discoverMetadata,
+ authServerUrl);
recorder.createJsonRPCService(beanContainer.getValue(), runtimeProperties, httpConfiguration);
diff --git a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/OidcDevUIProcessor.java b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/OidcDevUIProcessor.java
index 7223bdc5e018f..687ef5e9b9859 100644
--- a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/OidcDevUIProcessor.java
+++ b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/OidcDevUIProcessor.java
@@ -4,7 +4,6 @@
import java.util.Set;
import org.eclipse.microprofile.config.ConfigProvider;
-import org.jboss.logging.Logger;
import io.quarkus.arc.deployment.BeanContainerBuildItem;
import io.quarkus.deployment.Capabilities;
@@ -15,32 +14,22 @@
import io.quarkus.deployment.annotations.ExecutionTime;
import io.quarkus.deployment.annotations.Record;
import io.quarkus.deployment.builditem.ConfigurationBuildItem;
-import io.quarkus.deployment.builditem.CuratedApplicationShutdownBuildItem;
import io.quarkus.deployment.builditem.RuntimeConfigSetupCompleteBuildItem;
import io.quarkus.devservices.oidc.OidcDevServicesConfigBuildItem;
import io.quarkus.devui.spi.JsonRPCProvidersBuildItem;
import io.quarkus.devui.spi.page.CardPageBuildItem;
import io.quarkus.oidc.OidcTenantConfig;
import io.quarkus.oidc.OidcTenantConfig.Provider;
-import io.quarkus.oidc.common.runtime.OidcConstants;
import io.quarkus.oidc.deployment.OidcBuildTimeConfig;
import io.quarkus.oidc.runtime.devui.OidcDevJsonRpcService;
-import io.quarkus.oidc.runtime.devui.OidcDevServicesUtils;
import io.quarkus.oidc.runtime.devui.OidcDevUiRecorder;
import io.quarkus.oidc.runtime.providers.KnownOidcProviders;
import io.quarkus.runtime.configuration.ConfigUtils;
+import io.quarkus.vertx.core.deployment.CoreVertxBuildItem;
import io.quarkus.vertx.http.deployment.NonApplicationRootPathBuildItem;
import io.quarkus.vertx.http.runtime.HttpConfiguration;
-import io.vertx.core.Vertx;
-import io.vertx.core.http.HttpHeaders;
-import io.vertx.core.json.JsonObject;
-import io.vertx.mutiny.core.buffer.Buffer;
-import io.vertx.mutiny.ext.web.client.HttpResponse;
-import io.vertx.mutiny.ext.web.client.WebClient;
public class OidcDevUIProcessor extends AbstractDevUIProcessor {
- static volatile Vertx vertxInstance;
- private static final Logger LOG = Logger.getLogger(OidcDevUIProcessor.class);
private static final String TENANT_ENABLED_CONFIG_KEY = CONFIG_PREFIX + "tenant-enabled";
private static final String DISCOVERY_ENABLED_CONFIG_KEY = CONFIG_PREFIX + "discovery-enabled";
@@ -57,9 +46,9 @@ public class OidcDevUIProcessor extends AbstractDevUIProcessor {
@Record(ExecutionTime.RUNTIME_INIT)
@BuildStep(onlyIf = IsDevelopment.class)
+ @Consume(CoreVertxBuildItem.class) // metadata discovery requires Vertx instance
@Consume(RuntimeConfigSetupCompleteBuildItem.class)
- void prepareOidcDevConsole(CuratedApplicationShutdownBuildItem closeBuildItem,
- Capabilities capabilities,
+ void prepareOidcDevConsole(Capabilities capabilities,
HttpConfiguration httpConfiguration,
BeanContainerBuildItem beanContainer,
NonApplicationRootPathBuildItem nonApplicationRootPathBuildItem,
@@ -76,33 +65,8 @@ void prepareOidcDevConsole(CuratedApplicationShutdownBuildItem closeBuildItem,
? oidcDevServicesConfigBuildItem.get().getConfig().get(AUTH_SERVER_URL_CONFIG_KEY)
: getAuthServerUrl(providerConfig);
if (authServerUrl != null) {
- if (vertxInstance == null) {
- vertxInstance = Vertx.vertx();
-
- Runnable closeTask = new Runnable() {
- @Override
- public void run() {
- if (vertxInstance != null) {
- try {
- vertxInstance.close();
- } catch (Throwable t) {
- LOG.error("Failed to close Vertx instance", t);
- }
- }
- vertxInstance = null;
- }
- };
- closeBuildItem.addCloseTask(closeTask, true);
- }
- JsonObject metadata = null;
- if (isDiscoveryEnabled(providerConfig)) {
- metadata = discoverMetadata(authServerUrl);
- if (metadata == null) {
- return;
- }
- }
+ boolean discoverMetadata = isDiscoveryEnabled(providerConfig);
String providerName = tryToGetProviderName(authServerUrl);
- boolean metadataNotNull = metadata != null;
final String keycloakAdminUrl;
if (KEYCLOAK.equals(providerName)) {
@@ -116,12 +80,10 @@ public void run() {
getApplicationType(providerConfig),
oidcConfig.devui().grant().type().isPresent() ? oidcConfig.devui().grant().type().get().getGrantType()
: "code",
- metadataNotNull ? metadata.getString("authorization_endpoint") : null,
- metadataNotNull ? metadata.getString("token_endpoint") : null,
- metadataNotNull ? metadata.getString("end_session_endpoint") : null,
- metadataNotNull
- ? (metadata.containsKey("introspection_endpoint") || metadata.containsKey("userinfo_endpoint"))
- : checkProviderUserInfoRequired(providerConfig),
+ null,
+ null,
+ null,
+ checkProviderUserInfoRequired(providerConfig),
beanContainer,
oidcConfig.devui().webClientTimeout(),
oidcConfig.devui().grantOptions(),
@@ -131,7 +93,7 @@ public void run() {
null,
null,
true,
- httpConfiguration);
+ httpConfiguration, discoverMetadata, authServerUrl);
cardPageProducer.produce(cardPage);
}
}
@@ -141,14 +103,14 @@ JsonRPCProvidersBuildItem produceOidcDevJsonRpcService() {
return new JsonRPCProvidersBuildItem(OidcDevJsonRpcService.class);
}
- private boolean checkProviderUserInfoRequired(OidcTenantConfig providerConfig) {
+ private static boolean checkProviderUserInfoRequired(OidcTenantConfig providerConfig) {
if (providerConfig != null) {
- return providerConfig.authentication.userInfoRequired.orElse(false);
+ return providerConfig.authentication().userInfoRequired().orElse(false);
}
return false;
}
- private String tryToGetProviderName(String authServerUrl) {
+ private static String tryToGetProviderName(String authServerUrl) {
if (authServerUrl.contains("/realms/")) {
return KEYCLOAK;
}
@@ -163,28 +125,6 @@ private String tryToGetProviderName(String authServerUrl) {
return null;
}
- private JsonObject discoverMetadata(String authServerUrl) {
- WebClient client = OidcDevServicesUtils.createWebClient(vertxInstance);
- try {
- String metadataUrl = authServerUrl + OidcConstants.WELL_KNOWN_CONFIGURATION;
- LOG.infof("OIDC Dev Console: discovering the provider metadata at %s", metadataUrl);
-
- HttpResponse resp = client.getAbs(metadataUrl)
- .putHeader(HttpHeaders.ACCEPT.toString(), "application/json").send().await().indefinitely();
- if (resp.statusCode() == 200) {
- return resp.bodyAsJsonObject();
- } else {
- LOG.errorf("OIDC metadata discovery failed: %s", resp.bodyAsString());
- return null;
- }
- } catch (Throwable t) {
- LOG.infof("OIDC metadata can not be discovered: %s", t.toString());
- return null;
- } finally {
- client.close();
- }
- }
-
private static String getConfigProperty(String name) {
return ConfigProvider.getConfig().getValue(name, String.class);
}
@@ -195,7 +135,7 @@ private static boolean isOidcTenantEnabled() {
private static boolean isDiscoveryEnabled(OidcTenantConfig providerConfig) {
return ConfigProvider.getConfig().getOptionalValue(DISCOVERY_ENABLED_CONFIG_KEY, Boolean.class)
- .orElse((providerConfig != null ? providerConfig.discoveryEnabled.orElse(true) : true));
+ .orElse((providerConfig != null ? providerConfig.discoveryEnabled().orElse(true) : true));
}
private static boolean getBooleanProperty(String name) {
@@ -210,7 +150,7 @@ private static String getAuthServerUrl(OidcTenantConfig providerConfig) {
try {
return getConfigProperty(AUTH_SERVER_URL_CONFIG_KEY);
} catch (Exception ex) {
- return providerConfig != null ? providerConfig.authServerUrl.get() : null;
+ return providerConfig != null ? providerConfig.authServerUrl().get() : null;
}
}
diff --git a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/keycloak/KeycloakDevUIProcessor.java b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/keycloak/KeycloakDevUIProcessor.java
index c8468c7f002e0..71072fadc006d 100644
--- a/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/keycloak/KeycloakDevUIProcessor.java
+++ b/extensions/oidc/deployment/src/main/java/io/quarkus/oidc/deployment/devservices/keycloak/KeycloakDevUIProcessor.java
@@ -72,7 +72,7 @@ void produceProviderComponent(Optional confi
users,
keycloakRealms,
configProps.get().isContainerRestarted(),
- httpConfiguration);
+ httpConfiguration, false, null);
// use same card page so that both pages appear on the same card
var keycloakAdminPageItem = new KeycloakAdminPageBuildItem(cardPageBuildItem);
keycloakAdminPageProducer.produce(keycloakAdminPageItem);
diff --git a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcProviderClientImpl.java b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcProviderClientImpl.java
index 56d53f00498ea..123853e154ef6 100644
--- a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcProviderClientImpl.java
+++ b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcProviderClientImpl.java
@@ -466,7 +466,7 @@ Vertx getVertx() {
return vertx;
}
- WebClient getWebClient() {
+ public WebClient getWebClient() {
return client;
}
diff --git a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevJsonRpcService.java b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevJsonRpcService.java
index 2944e03ffa7ae..742677989272b 100644
--- a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevJsonRpcService.java
+++ b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevJsonRpcService.java
@@ -2,8 +2,6 @@
import static io.quarkus.oidc.runtime.devui.OidcDevServicesUtils.getTokens;
-import jakarta.annotation.PostConstruct;
-import jakarta.annotation.PreDestroy;
import jakarta.inject.Inject;
import org.eclipse.microprofile.config.ConfigProvider;
@@ -21,17 +19,8 @@ public class OidcDevJsonRpcService {
@Inject
OidcDevLoginObserver oidcDevTokensObserver;
- private Vertx vertx;
-
- @PostConstruct
- public void startup() {
- vertx = Vertx.vertx();
- }
-
- @PreDestroy
- public void shutdown() {
- vertx.close();
- }
+ @Inject
+ Vertx vertx;
@NonBlocking
public OidcDevUiRuntimePropertiesDTO getProperties() {
@@ -72,7 +61,7 @@ public Multi streamOidcLoginEvent() {
return oidcDevTokensObserver.streamOidcLoginEvent();
}
- public void hydrate(OidcDevUiRpcSvcPropertiesBean properties, HttpConfiguration httpConfiguration) {
+ void hydrate(OidcDevUiRpcSvcPropertiesBean properties, HttpConfiguration httpConfiguration) {
this.props = properties;
this.httpConfiguration = httpConfiguration;
}
diff --git a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevUiRecorder.java b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevUiRecorder.java
index f1b220e4d1610..7cefaff7013f0 100644
--- a/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevUiRecorder.java
+++ b/extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/devui/OidcDevUiRecorder.java
@@ -4,17 +4,28 @@
import java.util.List;
import java.util.Map;
+import org.jboss.logging.Logger;
+
import io.quarkus.arc.runtime.BeanContainer;
+import io.quarkus.oidc.common.runtime.OidcConstants;
import io.quarkus.oidc.runtime.OidcConfig;
import io.quarkus.runtime.RuntimeValue;
import io.quarkus.runtime.annotations.Recorder;
+import io.quarkus.vertx.core.runtime.VertxCoreRecorder;
import io.quarkus.vertx.http.runtime.HttpConfiguration;
import io.vertx.core.Handler;
+import io.vertx.core.http.HttpHeaders;
+import io.vertx.core.json.JsonObject;
import io.vertx.ext.web.RoutingContext;
+import io.vertx.mutiny.core.buffer.Buffer;
+import io.vertx.mutiny.ext.web.client.HttpResponse;
+import io.vertx.mutiny.ext.web.client.WebClient;
@Recorder
public class OidcDevUiRecorder {
+ private static final Logger LOG = Logger.getLogger(OidcDevUiRecorder.class);
+
private final RuntimeValue oidcConfigRuntimeValue;
public OidcDevUiRecorder(RuntimeValue oidcConfigRuntimeValue) {
@@ -31,8 +42,18 @@ public RuntimeValue getRpcServiceProperties(Strin
String logoutUrl, Duration webClientTimeout, Map> grantOptions,
Map oidcUsers, String oidcProviderName, String oidcApplicationType, String oidcGrantType,
boolean introspectionIsAvailable, String keycloakAdminUrl, List keycloakRealms, boolean swaggerIsAvailable,
- boolean graphqlIsAvailable, String swaggerUiPath, String graphqlUiPath, boolean alwaysLogoutUserInDevUiOnReload) {
-
+ boolean graphqlIsAvailable, String swaggerUiPath, String graphqlUiPath, boolean alwaysLogoutUserInDevUiOnReload,
+ boolean discoverMetadata, String authServerUrl) {
+ if (discoverMetadata) {
+ JsonObject metadata = discoverMetadata(authServerUrl);
+ if (metadata != null) {
+ authorizationUrl = metadata.getString("authorization_endpoint");
+ tokenUrl = metadata.getString("token_endpoint");
+ logoutUrl = metadata.getString("end_session_endpoint");
+ introspectionIsAvailable = metadata.containsKey("introspection_endpoint")
+ || metadata.containsKey("userinfo_endpoint");
+ }
+ }
return new RuntimeValue(
new OidcDevUiRpcSvcPropertiesBean(authorizationUrl, tokenUrl, logoutUrl,
webClientTimeout, grantOptions, oidcUsers, oidcProviderName, oidcApplicationType, oidcGrantType,
@@ -48,4 +69,25 @@ public Handler logoutHandler() {
return new OidcDevSessionLogoutHandler();
}
+ private static JsonObject discoverMetadata(String authServerUrl) {
+ WebClient client = OidcDevServicesUtils.createWebClient(VertxCoreRecorder.getVertx().get());
+ try {
+ String metadataUrl = authServerUrl + OidcConstants.WELL_KNOWN_CONFIGURATION;
+ LOG.infof("OIDC Dev Console: discovering the provider metadata at %s", metadataUrl);
+
+ HttpResponse resp = client.getAbs(metadataUrl)
+ .putHeader(HttpHeaders.ACCEPT.toString(), "application/json").send().await().indefinitely();
+ if (resp.statusCode() == 200) {
+ return resp.bodyAsJsonObject();
+ } else {
+ LOG.errorf("OIDC metadata discovery failed: %s", resp.bodyAsString());
+ return null;
+ }
+ } catch (Throwable t) {
+ LOG.infof("OIDC metadata can not be discovered: %s", t.toString());
+ return null;
+ } finally {
+ client.close();
+ }
+ }
}
diff --git a/extensions/opentelemetry/deployment/pom.xml b/extensions/opentelemetry/deployment/pom.xml
index cac0090542ced..f0b9e0ef6d2a4 100644
--- a/extensions/opentelemetry/deployment/pom.xml
+++ b/extensions/opentelemetry/deployment/pom.xml
@@ -90,6 +90,11 @@
vertx-web-client
test
+
+ io.smallrye.reactive
+ smallrye-mutiny-vertx-junit5
+ test
+
io.quarkus
quarkus-reactive-routes-deployment
diff --git a/extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/traces/MutinyTracingHelperTest.java b/extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/traces/MutinyTracingHelperTest.java
new file mode 100644
index 0000000000000..58313b1442200
--- /dev/null
+++ b/extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/traces/MutinyTracingHelperTest.java
@@ -0,0 +1,313 @@
+package io.quarkus.opentelemetry.deployment.traces;
+
+import static io.quarkus.opentelemetry.runtime.tracing.mutiny.MutinyTracingHelper.wrapWithSpan;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Stream;
+
+import jakarta.inject.Inject;
+
+import org.jboss.shrinkwrap.api.ShrinkWrap;
+import org.jboss.shrinkwrap.api.asset.StringAsset;
+import org.jboss.shrinkwrap.api.spec.JavaArchive;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.extension.RegisterExtension;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import io.opentelemetry.api.trace.Span;
+import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.context.Scope;
+import io.opentelemetry.sdk.trace.data.SpanData;
+import io.quarkus.opentelemetry.deployment.common.exporter.TestSpanExporter;
+import io.quarkus.opentelemetry.deployment.common.exporter.TestSpanExporterProvider;
+import io.quarkus.opentelemetry.runtime.QuarkusContextStorage;
+import io.quarkus.test.QuarkusUnitTest;
+import io.smallrye.common.vertx.VertxContext;
+import io.smallrye.mutiny.Multi;
+import io.smallrye.mutiny.Uni;
+import io.smallrye.mutiny.helpers.test.AssertSubscriber;
+import io.smallrye.mutiny.helpers.test.UniAssertSubscriber;
+import io.vertx.core.Context;
+import io.vertx.core.Vertx;
+
+class MutinyTracingHelperTest {
+
+ @RegisterExtension
+ static final QuarkusUnitTest TEST = new QuarkusUnitTest()
+ .setArchiveProducer(
+ () -> ShrinkWrap.create(JavaArchive.class)
+ .addClasses(TestSpanExporter.class, TestSpanExporterProvider.class)
+ .addAsResource(new StringAsset(TestSpanExporterProvider.class.getCanonicalName()),
+ "META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider"));
+
+ @Inject
+ private TestSpanExporter spanExporter;
+
+ @Inject
+ private Tracer tracer;
+
+ @Inject
+ private Vertx vertx;
+
+ @AfterEach
+ void tearDown() {
+ spanExporter.reset();
+ }
+
+ @ParameterizedTest(name = "{index}: Simple uni pipeline {1}")
+ @MethodSource("generateContextRunners")
+ void testSimpleUniPipeline(final String contextType, final String contextName) {
+
+ final UniAssertSubscriber subscriber = Uni.createFrom()
+ .item("Hello")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUni(m -> wrapWithSpan(tracer, "testSpan",
+ Uni.createFrom().item(m).onItem().transform(s -> {
+ final Span span = tracer.spanBuilder("subspan").startSpan();
+ try (final Scope scope = span.makeCurrent()) {
+ return s + " world";
+ } finally {
+ span.end();
+ }
+ })))
+ .subscribe()
+ .withSubscriber(new UniAssertSubscriber<>());
+
+ subscriber.awaitItem().assertItem("Hello world");
+
+ //ensure there are two spans with subspan as child of testSpan
+ final List spans = spanExporter.getFinishedSpanItems(2);
+ assertThat(spans.stream().map(SpanData::getName)).containsExactlyInAnyOrder("testSpan", "subspan");
+ assertChildSpan(spans, "testSpan", "subspan");
+ }
+
+ @ParameterizedTest(name = "{index}: Explicit parent {1}")
+ @MethodSource("generateContextRunners")
+ void testSpanWithExplicitParent(final String contextType, final String contextName) {
+
+ final String parentSpanName = "parentSpan";
+ final String pipelineSpanName = "pipelineSpan";
+ final String subspanName = "subspan";
+
+ final Span parentSpan = tracer.spanBuilder(parentSpanName).startSpan();
+ final io.opentelemetry.context.Context parentContext = io.opentelemetry.context.Context.current().with(parentSpan);
+
+ final UniAssertSubscriber subscriber = Uni.createFrom()
+ .item("Hello")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUni(m -> wrapWithSpan(tracer, Optional.of(parentContext),
+ pipelineSpanName,
+ Uni.createFrom().item(m).onItem().transform(s -> {
+ final Span span = tracer.spanBuilder(subspanName).startSpan();
+ try (final Scope scope = span.makeCurrent()) {
+ return s + " world";
+ } finally {
+ span.end();
+ }
+ })))
+ .subscribe()
+ .withSubscriber(new UniAssertSubscriber<>());
+
+ subscriber.awaitItem().assertItem("Hello world");
+ parentSpan.end();
+
+ //ensure there are 3 spans with proper parent-child relationships
+ final List spans = spanExporter.getFinishedSpanItems(3);
+ assertThat(spans.stream().map(SpanData::getName)).containsExactlyInAnyOrder(parentSpanName, pipelineSpanName,
+ subspanName);
+ assertChildSpan(spans, parentSpanName, pipelineSpanName);
+ assertChildSpan(spans, pipelineSpanName, subspanName);
+ }
+
+ @ParameterizedTest(name = "{index}: Nested uni pipeline with implicit parent {1}")
+ @MethodSource("generateContextRunners")
+ void testNestedPipeline_implicitParent(final String contextType,
+ final String contextName) {
+
+ final String parentSpanName = "parentSpan";
+ final String childSpanName = "childSpan";
+
+ final UniAssertSubscriber subscriber = Uni.createFrom()
+ .item("test")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUni(m -> wrapWithSpan(tracer, parentSpanName,
+ Uni.createFrom().item(m)
+ .onItem().transform(s -> s + " in outer span")
+ .onItem().transformToUni(m1 -> wrapWithSpan(tracer, childSpanName,
+ Uni.createFrom().item(m1)
+ .onItem().transform(s -> "now in inner span")))
+
+ ))
+ .subscribe()
+ .withSubscriber(new UniAssertSubscriber<>());
+
+ subscriber.awaitItem();
+
+ //ensure there are 2 spans with doSomething and doSomethingAsync as children of testSpan
+ final List spans = spanExporter.getFinishedSpanItems(2);
+ assertThat(spans.stream().map(SpanData::getName)).containsExactlyInAnyOrder(parentSpanName, childSpanName);
+ assertChildSpan(spans, parentSpanName, childSpanName);
+ }
+
+ @ParameterizedTest(name = "{index}: Nested uni pipeline with explicit no parent {1}")
+ @MethodSource("generateContextRunners")
+ void testNestedPipeline_explicitNoParent(final String contextType, final String contextName) {
+
+ final String parentSpanName = "parentSpan";
+ final String childSpanName = "childSpan";
+
+ final UniAssertSubscriber subscriber = Uni.createFrom()
+ .item("test")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUni(m -> wrapWithSpan(tracer, parentSpanName,
+ Uni.createFrom().item(m)
+ .onItem().transform(s -> s + " in outer span")
+ .onItem().transformToUni(m1 -> wrapWithSpan(tracer, Optional.empty(), childSpanName,
+ Uni.createFrom().item(m1)
+ .onItem().transform(s -> "now in inner span")))
+
+ ))
+ .subscribe()
+ .withSubscriber(new UniAssertSubscriber<>());
+
+ subscriber.awaitItem();
+
+ //ensure there are 2 spans but without parent-child relationship
+ final List spans = spanExporter.getFinishedSpanItems(2);
+ assertThat(spans.stream().map(SpanData::getName)).containsExactlyInAnyOrder(parentSpanName, childSpanName);
+ assertThat(spans.stream()
+ .filter(span -> span.getName().equals(childSpanName))
+ .findAny()
+ .orElseThrow()
+ .getParentSpanId()).isEqualTo("0000000000000000");//signifies no parent
+ }
+
+ @ParameterizedTest(name = "{index}: Concatenating multi pipeline {1}")
+ @MethodSource("generateContextRunners")
+ void testSimpleMultiPipeline_Concatenate(final String contextType, final String contextName) {
+
+ final AssertSubscriber subscriber = Multi.createFrom()
+ .items("test1", "test2", "test3")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUniAndConcatenate(m -> wrapWithSpan(tracer, Optional.empty(), "testSpan " + m,
+ //the traced pipeline
+ Uni.createFrom().item(m).onItem().transform(s -> {
+ final Span span = tracer.spanBuilder("subspan " + s).startSpan();
+ try (final Scope scope = span.makeCurrent()) {
+ return s + " transformed";
+ } finally {
+ span.end();
+ }
+ })))
+ .subscribe()
+ .withSubscriber(AssertSubscriber.create(3));
+
+ subscriber.awaitCompletion().assertItems("test1 transformed", "test2 transformed", "test3 transformed");
+
+ //ensure there are six spans with three pairs of subspan as child of testSpan
+ final List spans = spanExporter.getFinishedSpanItems(6);
+ for (int i = 1; i <= 3; i++) {
+ final int currentI = i;
+ assertThat(spans.stream().anyMatch(span -> span.getName().equals("testSpan test" + currentI))).isTrue();
+ assertThat(spans.stream().anyMatch(span -> span.getName().equals("subspan test" + currentI))).isTrue();
+ assertChildSpan(spans, "testSpan test" + currentI, "subspan test" + currentI);
+ }
+ }
+
+ @ParameterizedTest(name = "{index}: Merging multi pipeline {1}")
+ @MethodSource("generateContextRunners")
+ void testSimpleMultiPipeline_Merge(final String contextType, final String contextName) {
+
+ final AssertSubscriber subscriber = Multi.createFrom()
+ .items("test1", "test2", "test3")
+ .emitOn(r -> runOnContext(r, vertx, contextType))
+ .onItem()
+ .transformToUniAndMerge(m -> wrapWithSpan(tracer, Optional.empty(), "testSpan " + m,
+ Uni.createFrom().item(m).onItem().transform(s -> {
+ final Span span = tracer.spanBuilder("subspan " + s).startSpan();
+ try (final Scope scope = span.makeCurrent()) {
+ return s + " transformed";
+ } finally {
+ span.end();
+ }
+ })))
+ .subscribe()
+ .withSubscriber(AssertSubscriber.create(3));
+
+ subscriber.awaitCompletion();
+
+ //ensure there are six spans with three pairs of subspan as child of testSpan
+ final List spans = spanExporter.getFinishedSpanItems(6);
+ for (int i = 1; i <= 3; i++) {
+ final int currentI = i;
+ assertThat(spans.stream().anyMatch(span -> span.getName().equals("testSpan test" + currentI))).isTrue();
+ assertThat(spans.stream().anyMatch(span -> span.getName().equals("subspan test" + currentI))).isTrue();
+ assertChildSpan(spans, "testSpan test" + currentI, "subspan test" + currentI);
+ }
+ }
+
+ private static void assertChildSpan(final List spans, final String parentSpanName,
+ final String childSpanName1) {
+ assertThat(spans.stream()
+ .filter(span -> span.getName().equals(childSpanName1))
+ .findAny()
+ .orElseThrow()
+ .getParentSpanId()).isEqualTo(
+ spans.stream().filter(span -> span.getName().equals(parentSpanName)).findAny().get().getSpanId());
+ }
+
+ private static Stream generateContextRunners() {
+ return Stream.of(
+ Arguments.of("WITHOUT_CONTEXT", "Without Context"),
+ Arguments.of("ROOT_CONTEXT", "On Root Context"),
+ Arguments.of("DUPLICATED_CONTEXT", "On Duplicated Context"));
+ }
+
+ private void runOnContext(final Runnable runnable, final Vertx vertx, final String contextType) {
+ switch (contextType) {
+ case "WITHOUT_CONTEXT":
+ runWithoutContext(runnable);
+ break;
+ case "ROOT_CONTEXT":
+ runOnRootContext(runnable, vertx);
+ break;
+ case "DUPLICATED_CONTEXT":
+ runOnDuplicatedContext(runnable, vertx);
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown context type: " + contextType);
+ }
+ }
+
+ private static void runWithoutContext(final Runnable runnable) {
+ assertThat(QuarkusContextStorage.getVertxContext()).isNull();
+ runnable.run();
+ }
+
+ private static void runOnRootContext(final Runnable runnable, final Vertx vertx) {
+ final Context rootContext = VertxContext.getRootContext(vertx.getOrCreateContext());
+ assertThat(rootContext).isNotNull();
+ assertThat(VertxContext.isDuplicatedContext(rootContext)).isFalse();
+ assertThat(rootContext).isNotEqualTo(QuarkusContextStorage.getVertxContext());
+
+ rootContext.runOnContext(v -> runnable.run());
+ }
+
+ private static void runOnDuplicatedContext(final Runnable runnable, final Vertx vertx) {
+ final Context duplicatedContext = VertxContext.createNewDuplicatedContext(vertx.getOrCreateContext());
+ assertThat(duplicatedContext).isNotNull();
+ assertThat(VertxContext.isDuplicatedContext(duplicatedContext)).isTrue();
+
+ duplicatedContext.runOnContext(v -> runnable.run());
+ }
+
+}
diff --git a/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/exporter/otlp/OTelExporterRecorder.java b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/exporter/otlp/OTelExporterRecorder.java
index 57cd1f9d2253e..a4ae1b08bd535 100644
--- a/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/exporter/otlp/OTelExporterRecorder.java
+++ b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/exporter/otlp/OTelExporterRecorder.java
@@ -43,7 +43,12 @@
import io.quarkus.opentelemetry.runtime.config.build.OTelBuildConfig;
import io.quarkus.opentelemetry.runtime.config.runtime.BatchSpanProcessorConfig;
import io.quarkus.opentelemetry.runtime.config.runtime.OTelRuntimeConfig;
-import io.quarkus.opentelemetry.runtime.config.runtime.exporter.*;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.CompressionType;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.OtlpExporterConfig;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.OtlpExporterLogsConfig;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.OtlpExporterMetricsConfig;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.OtlpExporterRuntimeConfig;
+import io.quarkus.opentelemetry.runtime.config.runtime.exporter.OtlpExporterTracesConfig;
import io.quarkus.opentelemetry.runtime.exporter.otlp.logs.NoopLogRecordExporter;
import io.quarkus.opentelemetry.runtime.exporter.otlp.logs.VertxGrpcLogRecordExporter;
import io.quarkus.opentelemetry.runtime.exporter.otlp.logs.VertxHttpLogRecordExporter;
diff --git a/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/logs/OpenTelemetryLogHandler.java b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/logs/OpenTelemetryLogHandler.java
index 65e2de1270e70..24b7b9f93a78f 100644
--- a/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/logs/OpenTelemetryLogHandler.java
+++ b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/logs/OpenTelemetryLogHandler.java
@@ -1,12 +1,15 @@
package io.quarkus.opentelemetry.runtime.logs;
-import static io.opentelemetry.semconv.ExceptionAttributes.*;
+import static io.opentelemetry.semconv.ExceptionAttributes.EXCEPTION_MESSAGE;
+import static io.opentelemetry.semconv.ExceptionAttributes.EXCEPTION_STACKTRACE;
+import static io.opentelemetry.semconv.ExceptionAttributes.EXCEPTION_TYPE;
import static io.opentelemetry.semconv.incubating.CodeIncubatingAttributes.CODE_FUNCTION;
import static io.opentelemetry.semconv.incubating.CodeIncubatingAttributes.CODE_LINENO;
import static io.opentelemetry.semconv.incubating.CodeIncubatingAttributes.CODE_NAMESPACE;
import static io.opentelemetry.semconv.incubating.LogIncubatingAttributes.LOG_FILE_PATH;
-import static io.opentelemetry.semconv.incubating.ThreadIncubatingAttributes.*;
+import static io.opentelemetry.semconv.incubating.ThreadIncubatingAttributes.THREAD_ID;
import static io.quarkus.opentelemetry.runtime.config.build.OTelBuildConfig.INSTRUMENTATION_NAME;
+import static io.quarkus.vertx.http.runtime.attribute.ThreadNameAttribute.THREAD_NAME;
import java.io.PrintWriter;
import java.io.StringWriter;
diff --git a/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/mutiny/MutinyTracingHelper.java b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/mutiny/MutinyTracingHelper.java
new file mode 100644
index 0000000000000..a1197a0174312
--- /dev/null
+++ b/extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/mutiny/MutinyTracingHelper.java
@@ -0,0 +1,121 @@
+package io.quarkus.opentelemetry.runtime.tracing.mutiny;
+
+import java.util.Optional;
+import java.util.concurrent.CancellationException;
+
+import io.opentelemetry.api.trace.Span;
+import io.opentelemetry.api.trace.SpanBuilder;
+import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.context.Scope;
+import io.quarkus.opentelemetry.runtime.QuarkusContextStorage;
+import io.smallrye.mutiny.Uni;
+import io.vertx.core.Context;
+
+public class MutinyTracingHelper {
+
+ /**
+ * Wraps the given pipeline with a span with the given name. Ensures that subspans find the current span as context,
+ * by running on a duplicated context. The span will be closed when the pipeline completes.
+ * If there is already a span in the current context, it will be used as parent for the new span.
+ *
+ * Use as follows:
+ * Given this existing pipeline:
+ * ```java
+ * Uni.createFrom().item("Hello")
+ * .onItem().transform(s -> s + " World")
+ * .subscribe().with(System.out::println);
+ * ```
+ * wrap like this:
+ * ```java
+ * Uni.createFrom().item("Hello")
+ * .onItem().transformToUni(s -> wrapWithSpan(tracer, "mySpan", Uni.createFrom().item(s + " World")))
+ * .subscribe().with(System.out::println);
+ * ```
+ *
+ * it also works with multi:
+ * ```java
+ * Multi.createFrom().items("Alice", "Bob", "Charlie")
+ * .onItem().transform(name -> "Hello " + name)
+ * .subscribe().with(System.out::println);
+ * ```
+ * wrap like this:
+ * ```java
+ * Multi.createFrom().items("Alice", "Bob", "Charlie")
+ * .onItem().transformToUni(s -> wrapWithSpan(tracer, "mySpan", Uni.createFrom().item("Hello " + s)
+ * .onItem().transform(name -> "Hello " + name)
+ * ))
+ * .subscribe().with(System.out::println);
+ * ```
+ *
+ * @param the type of the result of the pipeline
+ * @param spanName
+ * the name of the span that should be created
+ * @param pipeline
+ * the pipeline to run within the span
+ *
+ * @return the result of the pipeline
+ */
+ public static Uni wrapWithSpan(final Tracer tracer, final String spanName, final Uni pipeline) {
+
+ return wrapWithSpan(tracer, Optional.of(io.opentelemetry.context.Context.current()), spanName, pipeline);
+ }
+
+ /**
+ * see {@link #wrapWithSpan(Tracer, String, Uni)}
+ * use this method if you manually want to specify the parent context of the new span
+ * or if you want to ensure the new span is a root span.
+ *
+ * @param
+ * @param parentContext
+ * the parent context to use for the new span. If empty, a new root span will be created.
+ * @param spanName
+ * the name of the span that should be created
+ * @param pipeline
+ * the pipeline to run within the span
+ *
+ * @return the result of the pipeline
+ */
+ public static Uni wrapWithSpan(final Tracer tracer,
+ final Optional parentContext,
+ final String spanName, final Uni pipeline) {
+
+ return runOnDuplicatedContext(Uni.createFrom().deferred(() -> {
+ final SpanBuilder spanBuilder = tracer.spanBuilder(spanName);
+ if (parentContext.isPresent()) {
+ spanBuilder.setParent(parentContext.get());
+ } else {
+ spanBuilder.setNoParent();
+ }
+ final Span span = spanBuilder.startSpan();
+ final Scope scope = span.makeCurrent();
+ return pipeline.onTermination()
+ .invoke((o, throwable, isCancelled) -> {
+ try {
+ if (Boolean.TRUE.equals(isCancelled)) {
+ span.recordException(new CancellationException());
+ } else if (throwable != null) {
+ span.recordException(throwable);
+ }
+ span.end();
+ } finally {
+ scope.close();
+ }
+ });
+ }));
+ }
+
+ private static Uni runOnDuplicatedContext(final Uni deferred) {
+ //creates duplicate context, if the current context is not a duplicated one and not null
+ //Otherwise returns the current context or null
+ final Context context = QuarkusContextStorage.getVertxContext();
+
+ return deferred.runSubscriptionOn(runnable -> {
+ if (context != null) {
+ context.runOnContext(v -> runnable.run());
+ } else {
+ runnable.run();
+ }
+ });
+ }
+
+}
diff --git a/extensions/redis-cache/runtime/src/main/java/io/quarkus/cache/redis/runtime/RedisCacheBuildRecorder.java b/extensions/redis-cache/runtime/src/main/java/io/quarkus/cache/redis/runtime/RedisCacheBuildRecorder.java
index 1ec0696eb0e9f..b0baa97b3aa5e 100644
--- a/extensions/redis-cache/runtime/src/main/java/io/quarkus/cache/redis/runtime/RedisCacheBuildRecorder.java
+++ b/extensions/redis-cache/runtime/src/main/java/io/quarkus/cache/redis/runtime/RedisCacheBuildRecorder.java
@@ -1,7 +1,10 @@
package io.quarkus.cache.redis.runtime;
import java.lang.reflect.Type;
-import java.util.*;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
import java.util.function.Supplier;
import org.jboss.logging.Logger;
diff --git a/extensions/tls-registry/cli/src/main/java/io/quarkus/tls/cli/letsencrypt/LetsEncryptRenewCommand.java b/extensions/tls-registry/cli/src/main/java/io/quarkus/tls/cli/letsencrypt/LetsEncryptRenewCommand.java
index e34717fc6c025..87c3f4f015f45 100644
--- a/extensions/tls-registry/cli/src/main/java/io/quarkus/tls/cli/letsencrypt/LetsEncryptRenewCommand.java
+++ b/extensions/tls-registry/cli/src/main/java/io/quarkus/tls/cli/letsencrypt/LetsEncryptRenewCommand.java
@@ -4,7 +4,8 @@
import static io.quarkus.tls.cli.letsencrypt.LetsEncryptConstants.DOT_ENV_FILE;
import static io.quarkus.tls.cli.letsencrypt.LetsEncryptConstants.KEY_FILE;
import static io.quarkus.tls.cli.letsencrypt.LetsEncryptConstants.LETS_ENCRYPT_DIR;
-import static io.quarkus.tls.cli.letsencrypt.LetsEncryptHelpers.*;
+import static io.quarkus.tls.cli.letsencrypt.LetsEncryptHelpers.adjustPermissions;
+import static io.quarkus.tls.cli.letsencrypt.LetsEncryptHelpers.renewCertificate;
import static java.lang.System.Logger.Level.INFO;
import java.util.concurrent.Callable;
diff --git a/extensions/vertx-graphql/deployment/src/main/java/io/quarkus/vertx/graphql/deployment/VertxGraphqlProcessor.java b/extensions/vertx-graphql/deployment/src/main/java/io/quarkus/vertx/graphql/deployment/VertxGraphqlProcessor.java
index 8638bca0e38d7..04cadebdd85bf 100644
--- a/extensions/vertx-graphql/deployment/src/main/java/io/quarkus/vertx/graphql/deployment/VertxGraphqlProcessor.java
+++ b/extensions/vertx-graphql/deployment/src/main/java/io/quarkus/vertx/graphql/deployment/VertxGraphqlProcessor.java
@@ -12,7 +12,10 @@
import io.quarkus.deployment.annotations.Record;
import io.quarkus.deployment.builditem.FeatureBuildItem;
import io.quarkus.deployment.builditem.LaunchModeBuildItem;
-import io.quarkus.deployment.builditem.nativeimage.*;
+import io.quarkus.deployment.builditem.nativeimage.NativeImageResourceBundleBuildItem;
+import io.quarkus.deployment.builditem.nativeimage.NativeImageResourceDirectoryBuildItem;
+import io.quarkus.deployment.builditem.nativeimage.ReflectiveClassBuildItem;
+import io.quarkus.deployment.builditem.nativeimage.RuntimeInitializedClassBuildItem;
import io.quarkus.runtime.configuration.ConfigurationException;
import io.quarkus.vertx.core.deployment.CoreVertxBuildItem;
import io.quarkus.vertx.graphql.runtime.VertxGraphqlRecorder;
diff --git a/extensions/vertx/deployment/src/main/java/io/quarkus/vertx/core/deployment/VertxCoreProcessor.java b/extensions/vertx/deployment/src/main/java/io/quarkus/vertx/core/deployment/VertxCoreProcessor.java
index 41acd5fe59f51..0feaf4c912316 100644
--- a/extensions/vertx/deployment/src/main/java/io/quarkus/vertx/core/deployment/VertxCoreProcessor.java
+++ b/extensions/vertx/deployment/src/main/java/io/quarkus/vertx/core/deployment/VertxCoreProcessor.java
@@ -220,6 +220,12 @@ IOThreadDetectorBuildItem ioThreadDetector(VertxCoreRecorder recorder) {
return new IOThreadDetectorBuildItem(recorder.detector());
}
+ @BuildStep
+ @Record(ExecutionTime.RUNTIME_INIT)
+ void configureLogging(VertxCoreRecorder recorder) {
+ recorder.configureQuarkusLoggerFactory();
+ }
+
@BuildStep
@Produce(ServiceStartBuildItem.class)
@Record(value = ExecutionTime.RUNTIME_INIT)
diff --git a/extensions/vertx/runtime/src/main/java/io/quarkus/vertx/core/runtime/VertxCoreRecorder.java b/extensions/vertx/runtime/src/main/java/io/quarkus/vertx/core/runtime/VertxCoreRecorder.java
index c28172f0e7567..e20e71618d897 100644
--- a/extensions/vertx/runtime/src/main/java/io/quarkus/vertx/core/runtime/VertxCoreRecorder.java
+++ b/extensions/vertx/runtime/src/main/java/io/quarkus/vertx/core/runtime/VertxCoreRecorder.java
@@ -69,6 +69,8 @@
@Recorder
public class VertxCoreRecorder {
+ private static final String LOGGER_FACTORY_NAME_SYS_PROP = "vertx.logger-delegate-factory-class-name";
+
static {
System.setProperty("vertx.disableTCCL", "true");
}
@@ -666,6 +668,13 @@ public static Supplier recoverFailedStart(VertxConfiguration config, Thre
}
+ public void configureQuarkusLoggerFactory() {
+ String loggerClassName = System.getProperty(LOGGER_FACTORY_NAME_SYS_PROP);
+ if (loggerClassName == null) {
+ System.setProperty(LOGGER_FACTORY_NAME_SYS_PROP, VertxLogDelegateFactory.class.getName());
+ }
+ }
+
static class VertxSupplier implements Supplier {
final LaunchMode launchMode;
final VertxConfiguration config;
diff --git a/independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractSharedContext.java b/independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractSharedContext.java
index 3648dfce08890..752a4c385ab9e 100644
--- a/independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractSharedContext.java
+++ b/independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractSharedContext.java
@@ -1,6 +1,9 @@
package io.quarkus.arc.impl;
-import java.util.*;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
diff --git a/independent-projects/extension-maven-plugin/src/main/java/io/quarkus/maven/ExtensionDescriptorMojo.java b/independent-projects/extension-maven-plugin/src/main/java/io/quarkus/maven/ExtensionDescriptorMojo.java
index 27acf1122cb66..ecdc6e4cbd800 100644
--- a/independent-projects/extension-maven-plugin/src/main/java/io/quarkus/maven/ExtensionDescriptorMojo.java
+++ b/independent-projects/extension-maven-plugin/src/main/java/io/quarkus/maven/ExtensionDescriptorMojo.java
@@ -8,7 +8,15 @@
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.maven.artifact.Artifact;
diff --git a/independent-projects/ide-config/src/main/resources/eclipse-format.xml b/independent-projects/ide-config/src/main/resources/eclipse-format.xml
index f6e4206e5f3f4..746da3f54f6af 100644
--- a/independent-projects/ide-config/src/main/resources/eclipse-format.xml
+++ b/independent-projects/ide-config/src/main/resources/eclipse-format.xml
@@ -1,322 +1,378 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/independent-projects/tools/devtools-common/src/main/java/io/quarkus/platform/catalog/processor/CatalogProcessor.java b/independent-projects/tools/devtools-common/src/main/java/io/quarkus/platform/catalog/processor/CatalogProcessor.java
index ae0037e90f61b..a947cd87bd2d1 100644
--- a/independent-projects/tools/devtools-common/src/main/java/io/quarkus/platform/catalog/processor/CatalogProcessor.java
+++ b/independent-projects/tools/devtools-common/src/main/java/io/quarkus/platform/catalog/processor/CatalogProcessor.java
@@ -2,7 +2,12 @@
import static io.quarkus.platform.catalog.processor.ExtensionProcessor.isUnlisted;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
import java.util.stream.Collectors;
import io.quarkus.registry.catalog.Category;
diff --git a/integration-tests/gradle/src/main/resources/basic-java-library-module/application/src/main/java/org/acme/ApplicationConfigResource.java b/integration-tests/gradle/src/main/resources/basic-java-library-module/application/src/main/java/org/acme/ApplicationConfigResource.java
index 82145f3a5c3d7..95910560675f7 100644
--- a/integration-tests/gradle/src/main/resources/basic-java-library-module/application/src/main/java/org/acme/ApplicationConfigResource.java
+++ b/integration-tests/gradle/src/main/resources/basic-java-library-module/application/src/main/java/org/acme/ApplicationConfigResource.java
@@ -1,13 +1,12 @@
package org.acme;
-
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
+import org.eclipse.microprofile.config.inject.ConfigProperty;
+
@Path("/app-config")
public class ApplicationConfigResource {
diff --git a/integration-tests/gradle/src/main/resources/custom-filesystem-provider/application/src/test/java/org/acme/ExampleResourceTest.java b/integration-tests/gradle/src/main/resources/custom-filesystem-provider/application/src/test/java/org/acme/ExampleResourceTest.java
index 6b2e243fe8f1d..d8b87fa230f9b 100644
--- a/integration-tests/gradle/src/main/resources/custom-filesystem-provider/application/src/test/java/org/acme/ExampleResourceTest.java
+++ b/integration-tests/gradle/src/main/resources/custom-filesystem-provider/application/src/test/java/org/acme/ExampleResourceTest.java
@@ -1,12 +1,12 @@
package org.acme;
+import static io.restassured.RestAssured.given;
+import static org.hamcrest.CoreMatchers.is;
-import io.quarkus.test.common.QuarkusTestResource;
-import io.quarkus.test.junit.QuarkusTest;
import org.junit.jupiter.api.Test;
-import static io.restassured.RestAssured.given;
-import static org.hamcrest.CoreMatchers.is;
+import io.quarkus.test.common.QuarkusTestResource;
+import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
@QuarkusTestResource(TestResource.class)
@@ -20,4 +20,4 @@ public void testHelloEndpoint() {
.statusCode(200)
.body(is("hello"));
}
-}
\ No newline at end of file
+}
diff --git a/integration-tests/kafka-oauth-keycloak/src/test/java/io/quarkus/it/kafka/KafkaKeycloakTestResource.java b/integration-tests/kafka-oauth-keycloak/src/test/java/io/quarkus/it/kafka/KafkaKeycloakTestResource.java
index a1f6c02497b70..ba3b08c697c1d 100644
--- a/integration-tests/kafka-oauth-keycloak/src/test/java/io/quarkus/it/kafka/KafkaKeycloakTestResource.java
+++ b/integration-tests/kafka-oauth-keycloak/src/test/java/io/quarkus/it/kafka/KafkaKeycloakTestResource.java
@@ -1,12 +1,11 @@
package io.quarkus.it.kafka;
import static io.strimzi.test.container.StrimziKafkaContainer.KAFKA_PORT;
+import static java.util.Map.entry;
import java.util.HashMap;
import java.util.Map;
-import org.testcontainers.utility.MountableFile;
-
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
import io.quarkus.test.keycloak.client.KeycloakTestClient;
import io.quarkus.test.keycloak.server.KeycloakContainer;
@@ -31,14 +30,24 @@ public Map start() {
//Start kafka container
this.kafka = new StrimziKafkaContainer("quay.io/strimzi/kafka:latest-kafka-3.7.0")
.withBrokerId(1)
- .withKafkaConfigurationMap(Map.of("listener.security.protocol.map",
- "JWT:SASL_PLAINTEXT,BROKER1:PLAINTEXT",
- "listener.name.jwt.oauthbearer.sasl.jaas.config",
- getOauthSaslJaasConfig(keycloak.getInternalUrl(), keycloak.getServerUrl()),
- "listener.name.jwt.plain.sasl.jaas.config",
- getPlainSaslJaasConfig(keycloak.getInternalUrl(), keycloak.getServerUrl())))
+ .withKafkaConfigurationMap(Map.ofEntries(
+ entry("listener.security.protocol.map", "JWT:SASL_PLAINTEXT,BROKER1:PLAINTEXT"),
+ entry("listener.name.jwt.oauthbearer.sasl.jaas.config",
+ getOauthSaslJaasConfig(keycloak.getInternalUrl(), keycloak.getServerUrl())),
+ entry("listener.name.jwt.plain.sasl.jaas.config",
+ getPlainSaslJaasConfig(keycloak.getInternalUrl(), keycloak.getServerUrl())),
+ entry("sasl.enabled.mechanisms", "OAUTHBEARER"),
+ entry("sasl.mechanism.inter.broker.protocol", "OAUTHBEARER"),
+ entry("oauth.username.claim", "preferred_username"),
+ entry("principal.builder.class", "io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder"),
+ entry("listener.name.jwt.sasl.enabled.mechanisms", "OAUTHBEARER,PLAIN"),
+ entry("listener.name.jwt.oauthbearer.sasl.server.callback.handler.class",
+ "io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler"),
+ entry("listener.name.jwt.oauthbearer.sasl.login.callback.handler.class",
+ "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"),
+ entry("listener.name.jwt.plain.sasl.server.callback.handler.class",
+ "io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler")))
.withNetworkAliases("kafka")
- .withServerProperties(MountableFile.forClasspathResource("kafkaServer.properties"))
.withBootstrapServers(
c -> String.format("JWT://%s:%s", c.getHost(), c.getMappedPort(KAFKA_PORT)));
this.kafka.start();
diff --git a/integration-tests/kafka-oauth-keycloak/src/test/resources/kafkaServer.properties b/integration-tests/kafka-oauth-keycloak/src/test/resources/kafkaServer.properties
deleted file mode 100644
index d148fcc18242f..0000000000000
--- a/integration-tests/kafka-oauth-keycloak/src/test/resources/kafkaServer.properties
+++ /dev/null
@@ -1,164 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=1
-
-############################# Socket Server Settings #############################
-
-# The address the socket server listens on. It will get the value returned from
-# java.net.InetAddress.getCanonicalHostName() if not configured.
-# FORMAT:
-# listeners = listener_name://host_name:port
-# EXAMPLE:
-# listeners = PLAINTEXT://your.host.name:9092
-#listeners=PLAINTEXT://:9092
-listeners=JWT://:9092
-#advertised.listeners=SASL_PLAINTEXT://localhost:9092
-
-
-
-# Hostname and port the broker will advertise to producers and consumers. If not set,
-# it uses the value for "listeners" if configured. Otherwise, it will use the value
-# returned from java.net.InetAddress.getCanonicalHostName().
-#advertised.listeners=PLAINTEXT://your.host.name:9092
-#advertised.listeners=SASL_PLAINTEXT://localhost:9092
-
-# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
-#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
-listener.security.protocol.map=JWT:SASL_PLAINTEXT
-
-
-# The number of threads that the server uses for receiving requests from the network and sending responses to the network
-num.network.threads=3
-
-# The number of threads that the server uses for processing requests, which may include disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-inter.broker.listener.name=JWT
-
-
-#### SASL ####
-
-sasl.enabled.mechanisms=OAUTHBEARER
-
-sasl.mechanism.inter.broker.protocol=OAUTHBEARER
-
-oauth.username.claim=preferred_username
-principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
-
-listener.name.jwt.sasl.enabled.mechanisms=OAUTHBEARER,PLAIN
-listener.name.jwt.oauthbearer.sasl.jaas.config=set_by_test
-
-listener.name.jwt.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
-listener.name.jwt.oauthbearer.sasl.login.callback.handler.class=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler
-#listener.name.jwt.plain.sasl.jaas.config=set_by_test
-
-listener.name.jwt.plain.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler
-
-############################# Log Basics #############################
-
-# A comma separated list of directories under which to store log files
-log.dirs=/tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Internal Topic Settings #############################
-# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
-# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
-offsets.topic.replication.factor=1
-transaction.state.log.replication.factor=1
-transaction.state.log.min.isr=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-# 1. Durability: Unflushed data may be lost if you are not using replication.
-# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion due to age
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
-# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=45000
-
-
-############################# Group Coordinator Settings #############################
-
-# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
-# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
-# The default value for this is 3 seconds.
-# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
-# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
-group.initial.rebalance.delay.ms=0
\ No newline at end of file
diff --git a/integration-tests/kafka-sasl-elytron/src/test/java/io/quarkus/it/kafka/KafkaSaslTestResource.java b/integration-tests/kafka-sasl-elytron/src/test/java/io/quarkus/it/kafka/KafkaSaslTestResource.java
index f496581ecaaa5..4535e9eae893a 100644
--- a/integration-tests/kafka-sasl-elytron/src/test/java/io/quarkus/it/kafka/KafkaSaslTestResource.java
+++ b/integration-tests/kafka-sasl-elytron/src/test/java/io/quarkus/it/kafka/KafkaSaslTestResource.java
@@ -1,6 +1,7 @@
package io.quarkus.it.kafka;
import static io.strimzi.test.container.StrimziKafkaContainer.KAFKA_PORT;
+import static java.util.Map.entry;
import java.util.HashMap;
import java.util.Map;
@@ -34,10 +35,23 @@ public Map start() {
//Start kafka container
kafka = new StrimziKafkaContainer()
+ .withBrokerId(0)
.withBootstrapServers(
c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getMappedPort(KAFKA_PORT)))
+ .withKafkaConfigurationMap(Map.ofEntries(
+ entry("listener.security.protocol.map", "SASL_PLAINTEXT:SASL_PLAINTEXT,BROKER1:PLAINTEXT"),
+ entry("inter.broker.listener.name", "SASL_PLAINTEXT"),
+ entry("sasl.enabled.mechanisms", "GSSAPI"),
+ entry("sasl.mechanism.inter.broker.protocol", "GSSAPI"),
+ entry("listener.name.sasl_plaintext.gssapi.sasl.jaas.config",
+ "com.sun.security.auth.module.Krb5LoginModule required " +
+ "useKeyTab=true storeKey=true debug=true serviceName=\"kafka\" " +
+ "keyTab=\"/opt/kafka/config/kafkabroker.keytab\" " +
+ "principal=\"kafka/localhost@EXAMPLE.COM\";"),
+ entry("sasl.kerberos.service.name", "kafka"),
+ entry("ssl.endpoint.identification.algorithm", "https"),
+ entry("ssl.client.auth", "none")))
.withPort(KAFKA_PORT)
- .withServerProperties(MountableFile.forClasspathResource("kafkaServer.properties"))
.withCopyFileToContainer(MountableFile.forClasspathResource("krb5KafkaBroker.conf"),
"/etc/krb5.conf")
.withCopyFileToContainer(MountableFile.forHostPath("target/kafkabroker.keytab"),
diff --git a/integration-tests/kafka-sasl-elytron/src/test/resources/kafkaServer.properties b/integration-tests/kafka-sasl-elytron/src/test/resources/kafkaServer.properties
deleted file mode 100644
index c10c56cc57ba2..0000000000000
--- a/integration-tests/kafka-sasl-elytron/src/test/resources/kafkaServer.properties
+++ /dev/null
@@ -1,172 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-############################# Socket Server Settings #############################
-
-# The address the socket server listens on. It will get the value returned from
-# java.net.InetAddress.getCanonicalHostName() if not configured.
-# FORMAT:
-# listeners = listener_name://host_name:port
-# EXAMPLE:
-# listeners = PLAINTEXT://your.host.name:9092
-#listeners=PLAINTEXT://:9092
-listeners=SASL_PLAINTEXT://:9092
-#advertised.listeners=SASL_PLAINTEXT://localhost:9092
-
-
-
-# Hostname and port the broker will advertise to producers and consumers. If not set,
-# it uses the value for "listeners" if configured. Otherwise, it will use the value
-# returned from java.net.InetAddress.getCanonicalHostName().
-#advertised.listeners=PLAINTEXT://your.host.name:9092
-#advertised.listeners=SASL_PLAINTEXT://localhost:9092
-
-# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
-#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
-listener.security.protocol.map=SASL_PLAINTEXT:SASL_PLAINTEXT
-
-
-# The number of threads that the server uses for receiving requests from the network and sending responses to the network
-num.network.threads=3
-
-# The number of threads that the server uses for processing requests, which may include disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-inter.broker.listener.name=SASL_PLAINTEXT
-
-
-#### SASL ####
-
-sasl.enabled.mechanisms=GSSAPI
-
-sasl.mechanism.inter.broker.protocol=GSSAPI
-
-#listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
-# username="broker" \
-# password="broker-secret" \
-# user_broker="broker-secret" \
-# user_client="client-secret";
-
-listener.name.sasl_plaintext.gssapi.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
- useKeyTab=true \
- storeKey=true \
- debug=true \
- serviceName="kafka" \
- keyTab="/opt/kafka/config/kafkabroker.keytab" \
- principal="kafka/localhost@EXAMPLE.COM";
-
-sasl.kerberos.service.name=kafka
-
-#ssl.endpoint.identification.algortigm=https://localhost
-ssl.endpoint.identification.algorithm=https
-ssl.client.auth=none
-
-############################# Log Basics #############################
-
-# A comma separated list of directories under which to store log files
-log.dirs=/tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Internal Topic Settings #############################
-# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
-# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
-offsets.topic.replication.factor=1
-transaction.state.log.replication.factor=1
-transaction.state.log.min.isr=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-# 1. Durability: Unflushed data may be lost if you are not using replication.
-# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion due to age
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
-# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=45000
-
-
-############################# Group Coordinator Settings #############################
-
-# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
-# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
-# The default value for this is 3 seconds.
-# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
-# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
-group.initial.rebalance.delay.ms=0
\ No newline at end of file
diff --git a/integration-tests/kafka-sasl/src/test/java/io/quarkus/it/kafka/KafkaSASLTestResource.java b/integration-tests/kafka-sasl/src/test/java/io/quarkus/it/kafka/KafkaSASLTestResource.java
index 8b3ffa5464990..cfbd669b7c0a4 100644
--- a/integration-tests/kafka-sasl/src/test/java/io/quarkus/it/kafka/KafkaSASLTestResource.java
+++ b/integration-tests/kafka-sasl/src/test/java/io/quarkus/it/kafka/KafkaSASLTestResource.java
@@ -1,26 +1,40 @@
package io.quarkus.it.kafka;
import static io.strimzi.test.container.StrimziKafkaContainer.KAFKA_PORT;
+import static java.util.Map.entry;
import java.util.HashMap;
import java.util.Map;
-import org.testcontainers.utility.MountableFile;
-
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
import io.strimzi.test.container.StrimziKafkaContainer;
public class KafkaSASLTestResource implements QuarkusTestResourceLifecycleManager {
private final StrimziKafkaContainer kafka = new StrimziKafkaContainer()
- .withServerProperties(MountableFile.forClasspathResource("server.properties"))
- .withBootstrapServers(
- container -> String.format("SASL_PLAINTEXT://%s:%s", container.getHost(),
- container.getMappedPort(KAFKA_PORT)));
+ .withBrokerId(0)
+ .withBootstrapServers(c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(),
+ c.getMappedPort(KAFKA_PORT)))
+ .withKafkaConfigurationMap(Map.ofEntries(
+ entry("listener.security.protocol.map",
+ "SASL_PLAINTEXT:SASL_PLAINTEXT,BROKER1:PLAINTEXT,PLAINTEXT:PLAINTEXT"),
+ entry("sasl.enabled.mechanisms", "PLAIN"),
+ entry("sasl.mechanism.inter.broker.protocol", "PLAIN"),
+ entry("listener.name.sasl_plaintext.plain.sasl.jaas.config",
+ "org.apache.kafka.common.security.plain.PlainLoginModule required " +
+ "username=\"broker\" " +
+ "password=\"broker-secret\" " +
+ "user_broker=\"broker-secret\" " +
+ "user_client=\"client-secret\";")));
@Override
public Map start() {
kafka.start();
+ try {
+ Thread.sleep(3000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
// Used by the test
System.setProperty("bootstrap.servers", kafka.getBootstrapServers());
// Used by the application
diff --git a/integration-tests/kafka-sasl/src/test/resources/server.properties b/integration-tests/kafka-sasl/src/test/resources/server.properties
deleted file mode 100644
index c7ee8c2298c00..0000000000000
--- a/integration-tests/kafka-sasl/src/test/resources/server.properties
+++ /dev/null
@@ -1,157 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-############################# Socket Server Settings #############################
-
-# The address the socket server listens on. It will get the value returned from
-# java.net.InetAddress.getCanonicalHostName() if not configured.
-# FORMAT:
-# listeners = listener_name://host_name:port
-# EXAMPLE:
-# listeners = PLAINTEXT://your.host.name:9092
-#listeners=PLAINTEXT://:9092
-listeners=SASL_PLAINTEXT://:9092
-
-
-
-# Hostname and port the broker will advertise to producers and consumers. If not set,
-# it uses the value for "listeners" if configured. Otherwise, it will use the value
-# returned from java.net.InetAddress.getCanonicalHostName().
-#advertised.listeners=PLAINTEXT://your.host.name:9092
-#advertised.listeners=SASL_PLAINTEXT://localhost:9092
-
-# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
-#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
-listener.security.protocol.map=SASL_PLAINTEXT:SASL_PLAINTEXT
-
-# The number of threads that the server uses for receiving requests from the network and sending responses to the network
-num.network.threads=3
-
-# The number of threads that the server uses for processing requests, which may include disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-inter.broker.listener.name=SASL_PLAINTEXT
-
-
-#### SASL ####
-
-sasl.enabled.mechanisms=PLAIN
-
-sasl.mechanism.inter.broker.protocol=PLAIN
-
-listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
- username="broker" \
- password="broker-secret" \
- user_broker="broker-secret" \
- user_client="client-secret";
-
-
-############################# Log Basics #############################
-
-# A comma separated list of directories under which to store log files
-log.dirs=/tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Internal Topic Settings #############################
-# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
-# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
-offsets.topic.replication.factor=1
-transaction.state.log.replication.factor=1
-transaction.state.log.min.isr=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-# 1. Durability: Unflushed data may be lost if you are not using replication.
-# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion due to age
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
-# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=45000
-
-
-############################# Group Coordinator Settings #############################
-
-# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
-# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
-# The default value for this is 3 seconds.
-# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
-# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
-group.initial.rebalance.delay.ms=0
\ No newline at end of file
diff --git a/integration-tests/kafka-ssl/src/test/java/io/quarkus/it/kafka/KafkaSSLTestResource.java b/integration-tests/kafka-ssl/src/test/java/io/quarkus/it/kafka/KafkaSSLTestResource.java
index 24b6a1ca7dff6..3aa2aff9d5f19 100644
--- a/integration-tests/kafka-ssl/src/test/java/io/quarkus/it/kafka/KafkaSSLTestResource.java
+++ b/integration-tests/kafka-ssl/src/test/java/io/quarkus/it/kafka/KafkaSSLTestResource.java
@@ -1,6 +1,7 @@
package io.quarkus.it.kafka;
import static io.strimzi.test.container.StrimziKafkaContainer.KAFKA_PORT;
+import static java.util.Map.entry;
import java.io.File;
import java.util.HashMap;
@@ -13,9 +14,21 @@
public class KafkaSSLTestResource implements QuarkusTestResourceLifecycleManager {
+ Map conf = new HashMap<>();
+
private final StrimziKafkaContainer kafka = new StrimziKafkaContainer()
.withBootstrapServers(c -> String.format("SSL://%s:%s", c.getHost(), c.getMappedPort(KAFKA_PORT)))
- .withServerProperties(MountableFile.forClasspathResource("server.properties"))
+ .withBrokerId(0)
+ .withKafkaConfigurationMap(Map.ofEntries(
+ entry("ssl.keystore.location", "/opt/kafka/config/kafka-keystore.p12"),
+ entry("ssl.keystore.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.keystore.type", "PKCS12"),
+ entry("ssl.key.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.truststore.location", "/opt/kafka/config/kafka-truststore.p12"),
+ entry("ssl.truststore.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.truststore.type", "PKCS12"),
+ entry("ssl.endpoint.identification.algorithm", ""),
+ entry("listener.security.protocol.map", "BROKER1:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL")))
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/kafka-keystore.p12"),
"/opt/kafka/config/kafka-keystore.p12")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/kafka-truststore.p12"),
diff --git a/integration-tests/kafka-ssl/src/test/resources/server.properties b/integration-tests/kafka-ssl/src/test/resources/server.properties
deleted file mode 100644
index 349085d8dbf99..0000000000000
--- a/integration-tests/kafka-ssl/src/test/resources/server.properties
+++ /dev/null
@@ -1,154 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-############################# Socket Server Settings #############################
-
-# The address the socket server listens on. It will get the value returned from
-# java.net.InetAddress.getCanonicalHostName() if not configured.
-# FORMAT:
-# listeners = listener_name://host_name:port
-# EXAMPLE:
-# listeners = PLAINTEXT://your.host.name:9092
-#listeners=PLAINTEXT://:9092
-listeners=SSL://:9092
-
-
-
-# Hostname and port the broker will advertise to producers and consumers. If not set,
-# it uses the value for "listeners" if configured. Otherwise, it will use the value
-# returned from java.net.InetAddress.getCanonicalHostName().
-#advertised.listeners=PLAINTEXT://your.host.name:9092
-advertised.listeners=SSL://:9092
-
-# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
-#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
-listener.security.protocol.map=SSL:SSL
-
-# The number of threads that the server uses for receiving requests from the network and sending responses to the network
-num.network.threads=3
-
-# The number of threads that the server uses for processing requests, which may include disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-inter.broker.listener.name=SSL
-
-#### SSL ####
-
-ssl.keystore.location=/opt/kafka/config/kafka-keystore.p12
-ssl.keystore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.keystore.type=PKCS12
-ssl.key.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.truststore.location=/opt/kafka/config/kafka-truststore.p12
-ssl.truststore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.truststore.type=PKCS12
-ssl.endpoint.identification.algorithm=
-
-
-############################# Log Basics #############################
-
-# A comma separated list of directories under which to store log files
-log.dirs=/tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Internal Topic Settings #############################
-# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
-# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
-offsets.topic.replication.factor=1
-transaction.state.log.replication.factor=1
-transaction.state.log.min.isr=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-# 1. Durability: Unflushed data may be lost if you are not using replication.
-# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion due to age
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
-# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=45000
-
-
-############################# Group Coordinator Settings #############################
-
-# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
-# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
-# The default value for this is 3 seconds.
-# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
-# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
-group.initial.rebalance.delay.ms=0
\ No newline at end of file
diff --git a/integration-tests/kafka-streams/src/test/java/io/quarkus/it/kafka/streams/KafkaSSLTestResource.java b/integration-tests/kafka-streams/src/test/java/io/quarkus/it/kafka/streams/KafkaSSLTestResource.java
index 7f62dcce37865..b51e74d3a8a31 100644
--- a/integration-tests/kafka-streams/src/test/java/io/quarkus/it/kafka/streams/KafkaSSLTestResource.java
+++ b/integration-tests/kafka-streams/src/test/java/io/quarkus/it/kafka/streams/KafkaSSLTestResource.java
@@ -1,6 +1,7 @@
package io.quarkus.it.kafka.streams;
import static io.strimzi.test.container.StrimziKafkaContainer.KAFKA_PORT;
+import static java.util.Map.entry;
import java.io.File;
import java.util.HashMap;
@@ -14,7 +15,16 @@
public class KafkaSSLTestResource implements QuarkusTestResourceLifecycleManager {
private static final StrimziKafkaContainer kafka = new StrimziKafkaContainer()
- .withServerProperties(MountableFile.forClasspathResource("server.properties"))
+ .withKafkaConfigurationMap(Map.ofEntries(
+ entry("ssl.keystore.location", "/opt/kafka/config/kafka-keystore.p12"),
+ entry("ssl.keystore.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.keystore.type", "PKCS12"),
+ entry("ssl.key.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.truststore.location", "/opt/kafka/config/kafka-truststore.p12"),
+ entry("ssl.truststore.password", "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L"),
+ entry("ssl.truststore.type", "PKCS12"),
+ entry("ssl.endpoint.identification.algorithm=", ""),
+ entry("listener.security.protocol.map", "BROKER1:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL")))
.withBootstrapServers(c -> String.format("SSL://%s:%s", c.getHost(), c.getMappedPort(KAFKA_PORT)))
.withCopyFileToContainer(MountableFile.forClasspathResource("ks-keystore.p12"),
"/opt/kafka/config/kafka-keystore.p12")
diff --git a/integration-tests/kafka-streams/src/test/resources/server.properties b/integration-tests/kafka-streams/src/test/resources/server.properties
deleted file mode 100644
index 349085d8dbf99..0000000000000
--- a/integration-tests/kafka-streams/src/test/resources/server.properties
+++ /dev/null
@@ -1,154 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-############################# Socket Server Settings #############################
-
-# The address the socket server listens on. It will get the value returned from
-# java.net.InetAddress.getCanonicalHostName() if not configured.
-# FORMAT:
-# listeners = listener_name://host_name:port
-# EXAMPLE:
-# listeners = PLAINTEXT://your.host.name:9092
-#listeners=PLAINTEXT://:9092
-listeners=SSL://:9092
-
-
-
-# Hostname and port the broker will advertise to producers and consumers. If not set,
-# it uses the value for "listeners" if configured. Otherwise, it will use the value
-# returned from java.net.InetAddress.getCanonicalHostName().
-#advertised.listeners=PLAINTEXT://your.host.name:9092
-advertised.listeners=SSL://:9092
-
-# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
-#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
-listener.security.protocol.map=SSL:SSL
-
-# The number of threads that the server uses for receiving requests from the network and sending responses to the network
-num.network.threads=3
-
-# The number of threads that the server uses for processing requests, which may include disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-inter.broker.listener.name=SSL
-
-#### SSL ####
-
-ssl.keystore.location=/opt/kafka/config/kafka-keystore.p12
-ssl.keystore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.keystore.type=PKCS12
-ssl.key.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.truststore.location=/opt/kafka/config/kafka-truststore.p12
-ssl.truststore.password=Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L
-ssl.truststore.type=PKCS12
-ssl.endpoint.identification.algorithm=
-
-
-############################# Log Basics #############################
-
-# A comma separated list of directories under which to store log files
-log.dirs=/tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Internal Topic Settings #############################
-# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
-# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
-offsets.topic.replication.factor=1
-transaction.state.log.replication.factor=1
-transaction.state.log.min.isr=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-# 1. Durability: Unflushed data may be lost if you are not using replication.
-# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion due to age
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
-# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=45000
-
-
-############################# Group Coordinator Settings #############################
-
-# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
-# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
-# The default value for this is 3 seconds.
-# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
-# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
-group.initial.rebalance.delay.ms=0
\ No newline at end of file
diff --git a/integration-tests/maven/src/test/resources-filtered/projects/project-with-extension/runner/src/main/java/org/acme/HelloResource.java b/integration-tests/maven/src/test/resources-filtered/projects/project-with-extension/runner/src/main/java/org/acme/HelloResource.java
index e4bb827f2625a..8aa6dfb55aedd 100644
--- a/integration-tests/maven/src/test/resources-filtered/projects/project-with-extension/runner/src/main/java/org/acme/HelloResource.java
+++ b/integration-tests/maven/src/test/resources-filtered/projects/project-with-extension/runner/src/main/java/org/acme/HelloResource.java
@@ -1,14 +1,13 @@
package org.acme;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-
import jakarta.inject.Inject;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
+import org.eclipse.microprofile.config.inject.ConfigProperty;
+
@Path("/hello")
public class HelloResource {
diff --git a/integration-tests/oidc-dev-services/pom.xml b/integration-tests/oidc-dev-services/pom.xml
index eace1af7f7741..50458d925f246 100644
--- a/integration-tests/oidc-dev-services/pom.xml
+++ b/integration-tests/oidc-dev-services/pom.xml
@@ -44,6 +44,11 @@
+
+ io.quarkus
+ quarkus-test-oidc-server
+ test
+
io.quarkus
diff --git a/integration-tests/oidc-dev-services/src/main/resources/application.properties b/integration-tests/oidc-dev-services/src/main/resources/application.properties
index 636d87caec1ef..02f7a3cbb7aa3 100644
--- a/integration-tests/oidc-dev-services/src/main/resources/application.properties
+++ b/integration-tests/oidc-dev-services/src/main/resources/application.properties
@@ -1,3 +1,6 @@
quarkus.oidc.devservices.enabled=true
+quarkus.oidc.devservices.roles.Ronald=admin
+%code-flow.quarkus.oidc.devservices.roles.alice=admin,user
+%code-flow.quarkus.oidc.devservices.roles.bob=user
%code-flow.quarkus.oidc.application-type=web-app
diff --git a/integration-tests/oidc-dev-services/src/test/java/io/quarkus/it/oidc/dev/services/BearerAuthenticationOidcDevServicesTest.java b/integration-tests/oidc-dev-services/src/test/java/io/quarkus/it/oidc/dev/services/BearerAuthenticationOidcDevServicesTest.java
index eac0592af5e07..623c51403e732 100644
--- a/integration-tests/oidc-dev-services/src/test/java/io/quarkus/it/oidc/dev/services/BearerAuthenticationOidcDevServicesTest.java
+++ b/integration-tests/oidc-dev-services/src/test/java/io/quarkus/it/oidc/dev/services/BearerAuthenticationOidcDevServicesTest.java
@@ -1,25 +1,34 @@
package io.quarkus.it.oidc.dev.services;
import org.hamcrest.Matchers;
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import io.quarkus.test.junit.QuarkusTest;
+import io.quarkus.test.oidc.client.OidcTestClient;
import io.restassured.RestAssured;
@QuarkusTest
public class BearerAuthenticationOidcDevServicesTest {
+ static final OidcTestClient oidcTestClient = new OidcTestClient();
+
+ @AfterAll
+ public static void close() {
+ oidcTestClient.close();
+ }
+
@Test
public void testLoginAsCustomUser() {
RestAssured.given()
- .auth().oauth2(getAccessToken("Ronald", "admin"))
+ .auth().oauth2(getAccessToken("Ronald"))
.get("/secured/admin-only")
.then()
.statusCode(200)
.body(Matchers.containsString("Ronald"))
.body(Matchers.containsString("admin"));
RestAssured.given()
- .auth().oauth2(getAccessToken("Ronald", "admin"))
+ .auth().oauth2(getAccessToken("Ronald"))
.get("/secured/user-only")
.then()
.statusCode(403);
@@ -62,16 +71,6 @@ public void testLoginAsBob() {
}
private String getAccessToken(String user) {
- return RestAssured.given().get(getAuthServerUrl() + "/testing/generate/access-token?user=" + user).asString();
- }
-
- private String getAccessToken(String user, String... roles) {
- return RestAssured.given()
- .get(getAuthServerUrl() + "/testing/generate/access-token?user=" + user + "&roles=" + String.join(",", roles))
- .asString();
- }
-
- private static String getAuthServerUrl() {
- return RestAssured.get("/secured/auth-server-url").then().statusCode(200).extract().body().asString();
+ return oidcTestClient.getAccessToken(user, user);
}
}
diff --git a/integration-tests/oidc-mtls/src/test/java/io/quarkus/it/oidc/OidcMtlsTest.java b/integration-tests/oidc-mtls/src/test/java/io/quarkus/it/oidc/OidcMtlsTest.java
index 0529704d89240..0f3371019ff86 100644
--- a/integration-tests/oidc-mtls/src/test/java/io/quarkus/it/oidc/OidcMtlsTest.java
+++ b/integration-tests/oidc-mtls/src/test/java/io/quarkus/it/oidc/OidcMtlsTest.java
@@ -10,6 +10,8 @@
import java.nio.file.Path;
import java.nio.file.Paths;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import io.quarkus.oidc.common.runtime.OidcConstants;
@@ -37,96 +39,94 @@ public class OidcMtlsTest {
@TestHTTPResource(tls = true)
URL url;
- @Test
- public void testMtlsJwt() throws Exception {
- Vertx vertx = Vertx.vertx();
- try {
- WebClientOptions options = createWebClientOptions();
- WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
-
- // HTTP 200
- HttpResponse resp = webClient.get("/service/mtls-jwt")
- .putHeader("Authorization",
- OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
- .send().await()
- .indefinitely();
- assertEquals(200, resp.statusCode());
- String name = resp.bodyAsString();
- assertEquals("Identities: CN=backend-service, alice;"
- + " Client: backend-service;"
- + " JWT cert thumbprint: true, introspection cert thumbprint: false", name);
-
- // HTTP 401, invalid token
- resp = webClient.get("/service/mtls-jwt")
- .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
- .send().await()
- .indefinitely();
- assertEquals(401, resp.statusCode());
- } finally {
+ private static Vertx vertx;
+
+ @BeforeAll
+ public static void createVertx() {
+ vertx = Vertx.vertx();
+ }
+
+ @AfterAll
+ public static void closeVertx() {
+ if (vertx != null) {
vertx.close();
}
}
+ @Test
+ public void testMtlsJwt() throws Exception {
+ WebClientOptions options = createWebClientOptions();
+ WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
+
+ // HTTP 200
+ HttpResponse resp = webClient.get("/service/mtls-jwt")
+ .putHeader("Authorization",
+ OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
+ .send().await()
+ .indefinitely();
+ assertEquals(200, resp.statusCode());
+ String name = resp.bodyAsString();
+ assertEquals("Identities: CN=backend-service, alice;"
+ + " Client: backend-service;"
+ + " JWT cert thumbprint: true, introspection cert thumbprint: false", name);
+
+ // HTTP 401, invalid token
+ resp = webClient.get("/service/mtls-jwt")
+ .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
+ .send().await()
+ .indefinitely();
+ assertEquals(401, resp.statusCode());
+ }
+
@Test
public void testMtlsIntrospection() throws Exception {
- Vertx vertx = Vertx.vertx();
- try {
- WebClientOptions options = createWebClientOptions();
- WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
-
- // HTTP 200
- HttpResponse resp = webClient.get("/service/mtls-introspection")
- .putHeader("Authorization",
- OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
- .send().await()
- .indefinitely();
- assertEquals(200, resp.statusCode());
- String name = resp.bodyAsString();
- assertEquals("Identities: CN=backend-service, alice;"
- + " Client: backend-service;"
- + " JWT cert thumbprint: false, introspection cert thumbprint: true", name);
-
- // HTTP 401, invalid token
- resp = webClient.get("/service/mtls-introspection")
- .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
- .send().await()
- .indefinitely();
- assertEquals(401, resp.statusCode());
- } finally {
- vertx.close();
- }
+ WebClientOptions options = createWebClientOptions();
+ WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
+
+ // HTTP 200
+ HttpResponse resp = webClient.get("/service/mtls-introspection")
+ .putHeader("Authorization",
+ OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
+ .send().await()
+ .indefinitely();
+ assertEquals(200, resp.statusCode());
+ String name = resp.bodyAsString();
+ assertEquals("Identities: CN=backend-service, alice;"
+ + " Client: backend-service;"
+ + " JWT cert thumbprint: false, introspection cert thumbprint: true", name);
+
+ // HTTP 401, invalid token
+ resp = webClient.get("/service/mtls-introspection")
+ .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
+ .send().await()
+ .indefinitely();
+ assertEquals(401, resp.statusCode());
}
@Test
public void testMtlsClientWithSecret() throws Exception {
- Vertx vertx = Vertx.vertx();
- try {
- WebClientOptions options = createWebClientOptions();
- WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
-
- String accessToken = getAccessToken("backend-client-with-secret", "secret", "alice");
- // HTTP 200
- HttpResponse resp = webClient.get("/service/mtls-client-with-secret")
- .putHeader("Authorization",
- OidcConstants.BEARER_SCHEME + " " + accessToken)
- .send().await()
- .indefinitely();
- assertEquals(200, resp.statusCode());
- String name = resp.bodyAsString();
- assertEquals("Identities: CN=backend-service, alice;"
- + " Client: backend-client-with-secret;"
- + " JWT cert thumbprint: false, introspection cert thumbprint: false", name);
-
- // HTTP 401, token is valid but it is not certificate bound
- resp = webClient.get("/service/mtls-jwt")
- .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + accessToken)
- .send().await()
- .indefinitely();
- assertEquals(401, resp.statusCode());
-
- } finally {
- vertx.close();
- }
+ WebClientOptions options = createWebClientOptions();
+ WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
+
+ String accessToken = getAccessToken("backend-client-with-secret", "secret", "alice");
+ // HTTP 200
+ HttpResponse resp = webClient.get("/service/mtls-client-with-secret")
+ .putHeader("Authorization",
+ OidcConstants.BEARER_SCHEME + " " + accessToken)
+ .send().await()
+ .indefinitely();
+ assertEquals(200, resp.statusCode());
+ String name = resp.bodyAsString();
+ assertEquals("Identities: CN=backend-service, alice;"
+ + " Client: backend-client-with-secret;"
+ + " JWT cert thumbprint: false, introspection cert thumbprint: false", name);
+
+ // HTTP 401, token is valid but it is not certificate bound
+ resp = webClient.get("/service/mtls-jwt")
+ .putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + accessToken)
+ .send().await()
+ .indefinitely();
+ assertEquals(401, resp.statusCode());
}
private String getAccessToken(String clientName, String clientSecret, String userName) {
diff --git a/integration-tests/smallrye-graphql-client-keycloak/src/test/java/io/quarkus/it/smallrye/graphql/keycloak/KeycloakRealmResourceManager.java b/integration-tests/smallrye-graphql-client-keycloak/src/test/java/io/quarkus/it/smallrye/graphql/keycloak/KeycloakRealmResourceManager.java
index a527c265f76af..b14148a84e5bc 100644
--- a/integration-tests/smallrye-graphql-client-keycloak/src/test/java/io/quarkus/it/smallrye/graphql/keycloak/KeycloakRealmResourceManager.java
+++ b/integration-tests/smallrye-graphql-client-keycloak/src/test/java/io/quarkus/it/smallrye/graphql/keycloak/KeycloakRealmResourceManager.java
@@ -1,10 +1,19 @@
package io.quarkus.it.smallrye.graphql.keycloak;
import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.keycloak.representations.AccessTokenResponse;
-import org.keycloak.representations.idm.*;
+import org.keycloak.representations.idm.ClientRepresentation;
+import org.keycloak.representations.idm.CredentialRepresentation;
+import org.keycloak.representations.idm.RealmRepresentation;
+import org.keycloak.representations.idm.RoleRepresentation;
+import org.keycloak.representations.idm.RolesRepresentation;
+import org.keycloak.representations.idm.UserRepresentation;
import org.keycloak.util.JsonSerialization;
import org.testcontainers.containers.GenericContainer;
diff --git a/pom.xml b/pom.xml
index 05a9946f3aeea..d2e3142be02e7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -177,8 +177,27 @@
false
+
+
+ org.ec4j.maven
+ editorconfig-maven-plugin
+ 0.1.3
+
+
+
+ check
+
+
+
+
+
+
+ org.ec4j.maven
+ editorconfig-maven-plugin
+
+
io.quarkus.bot
diff --git a/tcks/microprofile-jwt/src/main/java/io/quarkus/tck/jwt/DeploymentPropertyConfigProvider.java b/tcks/microprofile-jwt/src/main/java/io/quarkus/tck/jwt/DeploymentPropertyConfigProvider.java
index 790b39d2ea735..2d7447a37f30a 100644
--- a/tcks/microprofile-jwt/src/main/java/io/quarkus/tck/jwt/DeploymentPropertyConfigProvider.java
+++ b/tcks/microprofile-jwt/src/main/java/io/quarkus/tck/jwt/DeploymentPropertyConfigProvider.java
@@ -3,7 +3,6 @@
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URL;
-import java.util.*;
import org.eclipse.microprofile.config.spi.ConfigSource;
import org.eclipse.microprofile.config.spi.ConfigSourceProvider;
diff --git a/test-framework/junit5/src/main/java/io/quarkus/test/junit/QuarkusMainTestExtension.java b/test-framework/junit5/src/main/java/io/quarkus/test/junit/QuarkusMainTestExtension.java
index 7975bd7b1b18d..dafc53877c83e 100644
--- a/test-framework/junit5/src/main/java/io/quarkus/test/junit/QuarkusMainTestExtension.java
+++ b/test-framework/junit5/src/main/java/io/quarkus/test/junit/QuarkusMainTestExtension.java
@@ -5,7 +5,12 @@
import java.io.Closeable;
import java.lang.reflect.Method;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.logging.Handler;
diff --git a/test-framework/mongodb/src/main/java/io/quarkus/test/mongodb/MongoReplicaSetTestResource.java b/test-framework/mongodb/src/main/java/io/quarkus/test/mongodb/MongoReplicaSetTestResource.java
index 169e84cae8b9b..04102dc3cf4dd 100644
--- a/test-framework/mongodb/src/main/java/io/quarkus/test/mongodb/MongoReplicaSetTestResource.java
+++ b/test-framework/mongodb/src/main/java/io/quarkus/test/mongodb/MongoReplicaSetTestResource.java
@@ -1,9 +1,15 @@
package io.quarkus.test.mongodb;
-import static org.awaitility.Durations.*;
+import static org.awaitility.Durations.ONE_MINUTE;
+import static org.awaitility.Durations.ONE_SECOND;
import java.net.UnknownHostException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
import org.awaitility.Awaitility;
import org.bson.Document;
diff --git a/test-framework/oidc-server/src/main/java/io/quarkus/test/oidc/client/OidcTestClient.java b/test-framework/oidc-server/src/main/java/io/quarkus/test/oidc/client/OidcTestClient.java
index d0964004442a3..a857600ab2b50 100644
--- a/test-framework/oidc-server/src/main/java/io/quarkus/test/oidc/client/OidcTestClient.java
+++ b/test-framework/oidc-server/src/main/java/io/quarkus/test/oidc/client/OidcTestClient.java
@@ -9,6 +9,8 @@
import org.eclipse.microprofile.config.ConfigProvider;
+import io.quarkus.arc.Arc;
+import io.quarkus.arc.ArcContainer;
import io.vertx.core.MultiMap;
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
@@ -22,8 +24,8 @@ public class OidcTestClient {
private final static String CLIENT_ID_PROP = "quarkus.oidc.client-id";
private final static String CLIENT_SECRET_PROP = "quarkus.oidc.credentials.secret";
- Vertx vertx = Vertx.vertx();
- WebClient client = WebClient.create(vertx);
+ private volatile Vertx vertx = null;
+ private volatile WebClient client = null;
private String authServerUrl;
private String tokenUrl;
@@ -121,7 +123,7 @@ private String getAccessTokenInternal(MultiMap requestMap, Map e
requestMap = requestMap.addAll(extraProps);
}
- var result = client.postAbs(getTokenUrl())
+ var result = getClient().postAbs(getTokenUrl())
.putHeader("Content-Type", "application/x-www-form-urlencoded")
.sendBuffer(encodeForm(requestMap));
await().atMost(REQUEST_TIMEOUT).until(result::isComplete);
@@ -153,7 +155,7 @@ public String getAuthServerUrl() {
public String getTokenUrl() {
if (tokenUrl == null) {
getAuthServerUrl();
- var result = client.getAbs(authServerUrl + "/.well-known/openid-configuration")
+ var result = getClient().getAbs(authServerUrl + "/.well-known/openid-configuration")
.send();
await().atMost(REQUEST_TIMEOUT).until(result::isComplete);
tokenUrl = result.result().bodyAsJsonObject().getString("token_endpoint");
@@ -191,6 +193,27 @@ private static String urlEncode(String value) {
}
}
+ private WebClient getClient() {
+ if (client == null) {
+ client = WebClient.create(getVertx());
+ }
+ return client;
+ }
+
+ private Vertx getVertx() {
+ final ArcContainer container = Arc.container();
+ if (container != null && container.isRunning()) {
+ var managedVertx = container.instance(Vertx.class).orElse(null);
+ if (managedVertx != null) {
+ return managedVertx;
+ }
+ }
+ if (vertx == null) {
+ vertx = Vertx.vertx();
+ }
+ return vertx;
+ }
+
public void close() {
if (client != null) {
client.close();