diff --git a/backend/src/main/java/com/couchbase/admin/auth/controller/AuthController.java b/backend/src/main/java/com/couchbase/admin/auth/controller/AuthController.java index 3fe480a3..1b8597a1 100644 --- a/backend/src/main/java/com/couchbase/admin/auth/controller/AuthController.java +++ b/backend/src/main/java/com/couchbase/admin/auth/controller/AuthController.java @@ -31,6 +31,9 @@ import java.util.HashMap; import java.util.Map; +import java.util.UUID; + +import com.couchbase.admin.tokens.service.JwtTokenCacheService; /** * REST controller for Admin UI authentication @@ -60,6 +63,9 @@ public class AuthController { @Autowired private InitializationService initializationService; + @Autowired + private JwtTokenCacheService jwtTokenCacheService; + @Value("${app.security.use-keycloak:false}") private boolean useKeycloak; @@ -368,16 +374,28 @@ private String issueAdminAccessToken(String email, String[] scopes) { long hours = Long.parseLong(System.getProperty("oauth.token.expiry.hours", System.getenv().getOrDefault("OAUTH_TOKEN_EXPIRY_HOURS", "24"))); java.time.Instant exp = now.plus(java.time.Duration.ofHours(hours)); + String jti = UUID.randomUUID().toString(); JwtClaimsSet claims = JwtClaimsSet.builder() .subject(email) .issuedAt(now) .expiresAt(exp) - .id(java.util.UUID.randomUUID().toString()) // Add JTI for revocation tracking + .id(jti) // Add JTI for revocation tracking .claim("token_type", "admin") // Explicit token type (hardening) .claim("scope", String.join(" ", scopes)) .claim("email", email) .build(); - return jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); + + String token = jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); + + try { + if (jwtTokenCacheService != null) { + jwtTokenCacheService.addToken(jti); + } + } catch (Exception e) { + logger.warn("⚠️ [LOGIN] Failed to add JTI to JwtTokenCacheService: {}", e.getMessage()); + } + + return token; } catch (Exception e) { logger.error("❌ Failed to issue admin access token: {}", e.getMessage()); return null; diff --git a/backend/src/main/java/com/couchbase/admin/config/service/ConfigurationStartupService.java b/backend/src/main/java/com/couchbase/admin/config/service/ConfigurationStartupService.java index f8b6fd7a..d403f41c 100644 --- a/backend/src/main/java/com/couchbase/admin/config/service/ConfigurationStartupService.java +++ b/backend/src/main/java/com/couchbase/admin/config/service/ConfigurationStartupService.java @@ -47,6 +47,9 @@ public class ConfigurationStartupService { @Autowired private JwtTokenCacheService jwtTokenCacheService; + + @Autowired + private com.couchbase.admin.fhirBucket.service.FhirBucketService fhirBucketService; @Autowired(required = false) private com.couchbase.fhir.auth.AuthorizationServerConfig authorizationServerConfig; @@ -266,6 +269,17 @@ public boolean loadConfigurationAndConnect() { // Check initialization status for single-tenant "fhir" bucket checkAndReportInitializationStatus(); + // Ensure Admin collections exist (if missing) so admin seeding can proceed + try { + InitializationStatus status = initializationService.checkStatus(DEFAULT_CONNECTION_NAME); + if (status != null && !status.isAdminInitialized()) { + logger.info("ℹ️ Admin collections not present - ensuring Admin scope/collections now"); + fhirBucketService.ensureAdminCollections(DEFAULT_CONNECTION_NAME, initializationService.getFhirBucketName()); + } + } catch (Exception e) { + logger.debug("Could not ensure admin collections at startup: {}", e.getMessage()); + } + // Attempt to seed initial Admin user (from config.yaml) when appropriate seedAdminUserIfNeeded(); @@ -463,15 +477,15 @@ private void initializeTokenCache() { */ private void seedAdminUserIfNeeded() { try { - // Only attempt seeding when the FHIR bucket is initialized and ready. + // Only attempt seeding when Admin collections exist (we don't require full FHIR initialization). try { InitializationStatus status = initializationService.checkStatus(DEFAULT_CONNECTION_NAME); - if (status == null || status.getStatus() != InitializationStatus.Status.READY) { - logger.info("ℹ️ Skipping admin seeding: FHIR initialization status is not READY (status={})", status == null ? "null" : status.getStatus()); + if (status == null || !status.isAdminInitialized()) { + logger.info("ℹ️ Skipping admin seeding: Admin collections not present (status={})", status == null ? "null" : status.getStatus()); return; } } catch (Exception e) { - logger.warn("⚠️ Could not determine FHIR initialization status before seeding admin user: {}", e.getMessage()); + logger.warn("⚠️ Could not determine admin initialization status before seeding admin user: {}", e.getMessage()); logger.debug("Initialization status check error:", e); // If we cannot determine status, fail-safe: skip seeding to avoid exceptions during startup return; diff --git a/backend/src/main/java/com/couchbase/admin/fhirBucket/service/FhirBucketService.java b/backend/src/main/java/com/couchbase/admin/fhirBucket/service/FhirBucketService.java index 0c8536e9..74ffe8c5 100644 --- a/backend/src/main/java/com/couchbase/admin/fhirBucket/service/FhirBucketService.java +++ b/backend/src/main/java/com/couchbase/admin/fhirBucket/service/FhirBucketService.java @@ -1,220 +1,225 @@ package com.couchbase.admin.fhirBucket.service; -import com.couchbase.admin.fhirBucket.model.*; -import com.couchbase.admin.users.model.User; -import com.couchbase.admin.users.service.UserService; -import com.couchbase.common.config.AdminConfig; -import com.couchbase.admin.fhirBucket.config.FhirBucketProperties; import com.couchbase.admin.connections.service.ConnectionService; +import com.couchbase.admin.fhirBucket.config.FhirBucketProperties; +import com.couchbase.admin.fhirBucket.model.FhirBucketConfig; +import com.couchbase.admin.fhirBucket.model.FhirConversionRequest; +import com.couchbase.admin.fhirBucket.model.FhirConversionResponse; +import com.couchbase.admin.fhirBucket.model.FhirConversionStatus; +import com.couchbase.admin.fhirBucket.model.FhirConversionStatusDetail; import com.couchbase.admin.fts.config.FtsIndexCreator; import com.couchbase.admin.gsi.service.GsiIndexService; +import com.couchbase.admin.tokens.service.JwtTokenCacheService; +import com.couchbase.admin.users.model.User; +import com.couchbase.admin.users.service.UserService; import com.couchbase.client.java.Cluster; import com.couchbase.client.java.Collection; +import com.couchbase.client.java.http.CouchbaseHttpClient; +import com.couchbase.client.java.http.HttpPath; +import com.couchbase.client.java.http.HttpResponse; +import com.couchbase.client.java.http.HttpTarget; +import com.couchbase.client.java.json.JsonArray; import com.couchbase.client.java.json.JsonObject; -import com.couchbase.fhir.resources.service.FhirBucketConfigService; -import com.couchbase.fhir.resources.validation.FhirBucketValidator; import com.couchbase.client.java.manager.collection.CollectionManager; import com.couchbase.client.java.manager.collection.CollectionSpec; +import com.couchbase.client.java.query.QueryOptions; +import com.couchbase.common.config.AdminConfig; +import com.couchbase.fhir.resources.service.FhirBucketConfigService; +import com.couchbase.fhir.resources.validation.FhirBucketValidator; import com.nimbusds.jose.jwk.JWKSet; import com.nimbusds.jose.jwk.RSAKey; - -import com.couchbase.client.java.query.QueryOptions; -import java.time.Duration; -import java.time.Instant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CompletableFuture; -import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; @Service public class FhirBucketService { - + private static final Logger logger = LoggerFactory.getLogger(FhirBucketService.class); - - @Autowired - private ConnectionService connectionService; - - @Autowired - private FhirBucketConfigService fhirBucketConfigService; - - @Autowired - private FhirBucketValidator fhirBucketValidator; - - @Autowired - private FhirBucketProperties fhirProperties; - - @Autowired - private FtsIndexCreator ftsIndexCreator; - - @Autowired - private GsiIndexService gsiIndexService; - - @Autowired - private UserService userService; - - @Autowired - private AdminConfig adminConfig; - - @Autowired(required = false) - private com.couchbase.fhir.auth.AuthorizationServerConfig authorizationServerConfig; - - @Autowired - private com.couchbase.admin.tokens.service.JwtTokenCacheService jwtTokenCacheService; - - // Store operation status + + @Autowired private ConnectionService connectionService; + @Autowired private FhirBucketConfigService fhirBucketConfigService; + @Autowired private FhirBucketValidator fhirBucketValidator; + @Autowired private FhirBucketProperties fhirProperties; + @Autowired private FtsIndexCreator ftsIndexCreator; + @Autowired private GsiIndexService gsiIndexService; + @Autowired private UserService userService; + @Autowired private AdminConfig adminConfig; + @Autowired(required = false) private com.couchbase.fhir.auth.AuthorizationServerConfig authorizationServerConfig; + @Autowired private JwtTokenCacheService jwtTokenCacheService; + private final Map operationStatus = new ConcurrentHashMap<>(); - - public FhirBucketService() { - // Configuration will be loaded via @Autowired dependency - } - - /** - * Start FHIR bucket conversion process with custom configuration - */ + + /** Start FHIR bucket conversion process with custom configuration. */ public FhirConversionResponse startConversion(String bucketName, String connectionName, FhirConversionRequest request) { String operationId = UUID.randomUUID().toString(); - - // Create status tracking FhirConversionStatusDetail statusDetail = new FhirConversionStatusDetail(operationId, bucketName); operationStatus.put(operationId, statusDetail); - - // Start async conversion with custom config CompletableFuture.runAsync(() -> performConversion(operationId, bucketName, connectionName, request)); - - return new FhirConversionResponse( - operationId, - bucketName, - FhirConversionStatus.INITIATED, - "FHIR bucket conversion started" - ); + return new FhirConversionResponse(operationId, bucketName, FhirConversionStatus.INITIATED, "FHIR bucket conversion started"); } - - /** - * Start FHIR bucket conversion process with default configuration - */ + + /** Start FHIR bucket conversion process with default configuration. */ public FhirConversionResponse startConversion(String bucketName, String connectionName) { return startConversion(bucketName, connectionName, null); } - - /** - * Get conversion status - */ + + /** Get conversion status. */ public FhirConversionStatusDetail getConversionStatus(String operationId) { return operationStatus.get(operationId); } - - /** - * Perform the actual conversion process with custom configuration - */ + + /** Perform the actual conversion process with custom configuration. */ private void performConversion(String operationId, String bucketName, String connectionName, FhirConversionRequest request) { FhirConversionStatusDetail status = operationStatus.get(operationId); - try { status.setStatus(FhirConversionStatus.IN_PROGRESS); - - // Get connection by name + Cluster cluster = connectionService.getConnection(connectionName); if (cluster == null) { throw new IllegalStateException("No active Couchbase connection found for: " + connectionName); } - + CollectionManager collectionManager = cluster.bucket(bucketName).collections(); - - // Step 1: Create Admin scope - updateStatus(status, "create_admin_scope", "Creating Admin scope"); - createScope(collectionManager, "Admin"); - status.setCompletedSteps(1); - - // Step 2: Create Resources scope + updateStatus(status, "create_resources_scope", "Creating Resources scope"); createScope(collectionManager, "Resources"); - status.setCompletedSteps(2); - - // Step 3: Create Admin collections - updateStatus(status, "create_admin_collections", "Creating Admin collections"); - createCollections(collectionManager, "Admin", fhirProperties.getScopes().get("admin")); - status.setCompletedSteps(3); - - // Step 4: Create Resource collections + status.setCompletedSteps(1); + updateStatus(status, "create_resource_collections", "Creating Resource collections"); - createCollections(collectionManager, "Resources", fhirProperties.getScopes().get("resources")); - status.setCompletedSteps(4); - - // Step 5: Create primary indexes + FhirBucketProperties.ScopeConfiguration resourcesScope = getScope("resources"); + createCollections(collectionManager, "Resources", getCollectionNames(resourcesScope)); + status.setCompletedSteps(2); + updateStatus(status, "create_indexes", "Creating primary indexes for collections"); createIndexes(cluster, bucketName); - status.setCompletedSteps(5); - - // Step 6: Build deferred indexes + status.setCompletedSteps(3); + updateStatus(status, "build_deferred_indexes", "Building deferred indexes"); buildDeferredIndexes(cluster, bucketName); - status.setCompletedSteps(6); - - // Step 7: Create FTS indexes + status.setCompletedSteps(4); + updateStatus(status, "create_fts_indexes", "Creating FTS indexes for collections"); createFtsIndexes(connectionName, bucketName); - status.setCompletedSteps(7); - - // Step 8: Create GSI indexes (for Auth collections) + status.setCompletedSteps(5); + updateStatus(status, "create_gsi_indexes", "Creating GSI indexes from gsi-indexes.sql"); createGsiIndexes(cluster, bucketName); - status.setCompletedSteps(8); - - // Step 9: Mark as FHIR bucket with custom configuration + status.setCompletedSteps(6); + updateStatus(status, "mark_as_fhir", "Marking bucket as FHIR-enabled"); markAsFhirBucketWithConfig(bucketName, connectionName, request); - status.setCompletedSteps(9); - - // Step 10: Seed initial Admin user from config.yaml (if not already present) - updateStatus(status, "create_admin_user", "Creating initial Admin user"); - createInitialAdminUserIfNeeded(); - status.setCompletedSteps(10); - - // Step 11: Persist OAuth signing key (same key used since startup) + status.setCompletedSteps(7); + updateStatus(status, "persist_oauth_key", "Persisting OAuth signing key"); createOAuthSigningKey(cluster, bucketName); - status.setCompletedSteps(11); - - // Reload JWT token cache after initialization completes - // This ensures the cache is ready for token validation - logger.debug("🔐 Reloading JWT token cache after initialization..."); + status.setCompletedSteps(8); + + logger.debug("Reloading JWT token cache after initialization..."); try { jwtTokenCacheService.loadActiveTokens(); if (jwtTokenCacheService.isInitialized()) { int cacheSize = jwtTokenCacheService.getCacheSize(); - logger.debug("✅ Token cache reloaded with {} active tokens", cacheSize); + logger.debug("Token cache reloaded with {} active tokens", cacheSize); } else { - logger.debug("✅ Token cache initialized (no tokens yet)"); + logger.debug("Token cache initialized (no tokens yet)"); } } catch (Exception e) { - logger.warn("⚠️ Failed to reload token cache after initialization: {}", e.getMessage()); - // Don't fail the initialization if cache reload fails + logger.warn("Failed to reload token cache after initialization: {}", e.getMessage()); } - - // Completion + status.setStatus(FhirConversionStatus.COMPLETED); status.setCurrentStepDescription("FHIR bucket conversion completed successfully"); - // logger.info("FHIR conversion completed for bucket: {}", bucketName); - } catch (Exception e) { - // logger.error("FHIR conversion failed for bucket: {}", bucketName, e); status.setStatus(FhirConversionStatus.FAILED); status.setErrorMessage(e.getMessage()); status.setCurrentStepDescription("Conversion failed: " + e.getMessage()); } } - /** - * Create the initial Admin user based on config.yaml credentials, if it does not already exist. - * Uses local authentication so the Admin can later change their password via the Users UI. - */ + /** Ensure the Admin scope and required Admin collections exist. */ + public void ensureAdminCollections(String connectionName, String bucketName) { + Cluster cluster = connectionService.getConnection(connectionName); + if (cluster == null) { + logger.warn("No active connection found for: {}", connectionName); + return; + } + + try { + CollectionManager collectionManager = cluster.bucket(bucketName).collections(); + createScope(collectionManager, "Admin"); + createCollections(collectionManager, "Admin", getAdminCollections()); + } catch (Exception e) { + logger.warn("Failed to ensure Admin collections: {}", e.getMessage()); + logger.debug("Admin collection ensure error", e); + } + } + + /** Check whether Admin scope and required collections are already present. */ + public boolean areAdminCollectionsPresent(String connectionName, String bucketName) { + Cluster cluster = connectionService.getConnection(connectionName); + if (cluster == null) { + return false; + } + + try { + CollectionManager collectionManager = cluster.bucket(bucketName).collections(); + var scopes = collectionManager.getAllScopes(); + var adminScopeOpt = scopes.stream().filter(scope -> "Admin".equals(scope.name())).findFirst(); + if (adminScopeOpt.isEmpty()) { + return false; + } + + Set expected = new HashSet<>(getAdminCollections()); + Set existing = adminScopeOpt.get().collections().stream() + .map(CollectionSpec::name) + .collect(Collectors.toSet()); + + return existing.containsAll(expected); + } catch (Exception e) { + logger.debug("Could not check Admin collections presence: {}", e.getMessage()); + return false; + } + } + + private FhirBucketProperties.ScopeConfiguration getScope(String name) { + Map scopes = fhirProperties.getScopes(); + return scopes != null ? scopes.get(name) : null; + } + + private List getCollectionNames(FhirBucketProperties.ScopeConfiguration scopeConfig) { + if (scopeConfig == null || scopeConfig.getCollections() == null) { + return List.of(); + } + return scopeConfig.getCollections().stream() + .map(FhirBucketProperties.CollectionConfiguration::getName) + .collect(Collectors.toList()); + } + + private List getAdminCollections() { + List configured = getCollectionNames(getScope("admin")); + if (!configured.isEmpty()) { + return configured; + } + return Arrays.asList("config", "users", "tokens", "clients", "cache", "bulk_groups"); + } + private void createInitialAdminUserIfNeeded() { try { String email = adminConfig.getEmail(); @@ -222,221 +227,151 @@ private void createInitialAdminUserIfNeeded() { String name = adminConfig.getName(); if (email == null || email.isEmpty()) { - logger.warn("⚠️ Skipping initial Admin user creation: admin.email is not configured"); + logger.warn("Skipping initial Admin user creation: admin.email is not configured"); return; } - // Check if user already exists if (userService.getUserById(email).isPresent()) { - logger.debug("👤 Initial Admin user '{}' already exists - skipping creation", email); + logger.debug("Initial Admin user '{}' already exists - skipping creation", email); return; } User adminUser = new User(); - adminUser.setId(email); // Use email as ID (consistent with Admin UI) + adminUser.setId(email); adminUser.setUsername(name != null ? name : "Administrator"); adminUser.setEmail(email); adminUser.setRole("admin"); adminUser.setAuthMethod("local"); - adminUser.setPasswordHash(password); // Will be hashed by UserService.createUser + adminUser.setPasswordHash(password); userService.createUser(adminUser, "system"); - logger.debug("✅ Initial Admin user '{}' created successfully in Admin.users collection", email); + logger.debug("Initial Admin user '{}' created successfully in Admin.users collection", email); } catch (Exception e) { - // Do not fail bucket initialization if seeding the Admin user fails - logger.error("❌ Failed to create initial Admin user from config.yaml: {}", e.getMessage()); + logger.error("Failed to create initial Admin user from config.yaml: {}", e.getMessage()); } } - + private void updateStatus(FhirConversionStatusDetail status, String stepName, String description) { status.setCurrentStep(stepName); status.setCurrentStepDescription(description); - // logger.info("Operation {}: {}", status.getOperationId(), description); } - + private void createScope(CollectionManager manager, String scopeName) throws Exception { try { manager.createScope(scopeName); - // logger.info("Created scope: {}", scopeName); } catch (Exception e) { - if (e.getMessage() != null && e.getMessage().contains("already exists")) { - // logger.warn("Scope {} already exists, skipping", scopeName); - } else { + if (e.getMessage() == null || !e.getMessage().contains("already exists")) { throw e; } } } - - private void createCollections(CollectionManager manager, String scopeName, - FhirBucketProperties.ScopeConfiguration scopeConfig) throws Exception { - // logger.info("Creating collections for scope: {}", scopeName); - for (FhirBucketProperties.CollectionConfiguration collection : scopeConfig.getCollections()) { + + private void createCollections(CollectionManager manager, String scopeName, List collections) throws Exception { + if (collections == null || collections.isEmpty()) return; + for (String coll : collections) { try { - // logger.info("Creating collection: {}.{}", scopeName, collection.getName()); - - // Check if collection has maxTTL configured - if (collection.getMaxTtlSeconds() != null && collection.getMaxTtlSeconds() > 0) { - // Create collection with maxTTL using CollectionSpec - Duration maxTtl = Duration.ofSeconds(collection.getMaxTtlSeconds()); - CollectionSpec spec = CollectionSpec.create(collection.getName(), scopeName, maxTtl); - manager.createCollection(spec); - logger.debug("✅ Created collection {}.{} with maxTTL: {}s", - scopeName, collection.getName(), collection.getMaxTtlSeconds()); - } else { - // Create collection without maxTTL (simple API) - manager.createCollection(scopeName, collection.getName()); - // logger.info("Successfully created collection: {}.{}", scopeName, collection.getName()); - } + manager.createCollection(CollectionSpec.create(coll, scopeName)); } catch (Exception e) { - if (e.getMessage() != null && e.getMessage().contains("already exists")) { - // logger.warn("Collection {}.{} already exists, skipping", scopeName, collection.getName()); - } else { - // logger.error("Failed to create collection {}.{}: {}", scopeName, collection.getName(), e.getMessage()); + if (e.getMessage() == null || !e.getMessage().contains("already exists")) { throw e; } } } } - + private void createIndexes(Cluster cluster, String bucketName) throws Exception { - // Create indexes for both scopes - for (Map.Entry scopeEntry : - fhirProperties.getScopes().entrySet()) { - - FhirBucketProperties.ScopeConfiguration scopeConfig = scopeEntry.getValue(); - - for (FhirBucketProperties.CollectionConfiguration collection : scopeConfig.getCollections()) { - // Skip if collection has no indexes defined - if (collection.getIndexes() == null || collection.getIndexes().isEmpty()) { + FhirBucketProperties.ScopeConfiguration resourcesScope = getScope("resources"); + if (resourcesScope == null || resourcesScope.getCollections() == null) { + return; + } + + for (FhirBucketProperties.CollectionConfiguration collection : resourcesScope.getCollections()) { + if (collection.getIndexes() == null || collection.getIndexes().isEmpty()) { + continue; + } + for (FhirBucketProperties.IndexConfiguration index : collection.getIndexes()) { + if (index.getSql() == null) { continue; } - - for (FhirBucketProperties.IndexConfiguration index : collection.getIndexes()) { - try { - // Add null check and debug logging - if (index.getSql() == null) { - // logger.error("SQL is null for index: {} in collection: {}.{}", - // index.getName(), scopeConfig.getName(), collection.getName()); - continue; // Skip this index - } - - String sql = index.getSql().replace("{bucket}", bucketName); - cluster.query(sql, QueryOptions.queryOptions().timeout(java.time.Duration.ofMinutes(5))); - // logger.info("Created index: {} for collection: {}.{}", - // index.getName(), scopeConfig.getName(), collection.getName()); - } catch (Exception e) { - if (e.getMessage() != null && e.getMessage().contains("already exists")) { - // logger.warn("Index {} already exists, skipping", index.getName()); - } else { - throw e; - } + try { + String sql = index.getSql().replace("{bucket}", bucketName); + cluster.query(sql, QueryOptions.queryOptions().timeout(Duration.ofMinutes(5))); + } catch (Exception e) { + if (e.getMessage() == null || !e.getMessage().contains("already exists")) { + throw e; } } } } } - + private void buildDeferredIndexes(Cluster cluster, String bucketName) throws Exception { - // Get the build commands from configuration List buildCommands = fhirProperties.getBuildCommands(); - - if (buildCommands != null && !buildCommands.isEmpty()) { - for (FhirBucketProperties.BuildCommand buildCommand : buildCommands) { - // Execute the query to get the BUILD INDEX statements - String query = buildCommand.getQuery().replace("{bucket}", bucketName); - + if (buildCommands == null || buildCommands.isEmpty()) { + return; + } + for (FhirBucketProperties.BuildCommand buildCommand : buildCommands) { + String query = buildCommand.getQuery().replace("{bucket}", bucketName); + var result = cluster.query(query); + for (String buildIndexSql : result.rowsAs(String.class)) { + buildIndexSql = buildIndexSql.replaceAll("^\"|\"$", ""); try { - var result = cluster.query(query); - - // Process each BUILD INDEX statement - use rowsAs() for raw string results - for (String buildIndexSql : result.rowsAs(String.class)) { - // Remove quotes if present - buildIndexSql = buildIndexSql.replaceAll("^\"|\"$", ""); - // logger.info("Executing BUILD INDEX: {}", buildIndexSql); - - try { - cluster.query(buildIndexSql); - // logger.info("Successfully built index"); - } catch (Exception e) { - logger.error("Failed to build index: {} - {}", buildIndexSql, e.getMessage()); - // Continue with other indexes even if one fails - } - } + cluster.query(buildIndexSql); } catch (Exception e) { - logger.error("Failed to execute build command query: {}", query, e); - throw e; + logger.error("Failed to build index: {} - {}", buildIndexSql, e.getMessage()); } } - } else { - // logger.info("No build commands found in configuration"); } } - + private void createFtsIndexes(String connectionName, String bucketName) throws Exception { try { - // Use the FtsIndexCreator to create all FTS indexes via REST API ftsIndexCreator.createAllFtsIndexesForBucket(connectionName, bucketName); } catch (Exception e) { - logger.error("❌ Failed to create FTS indexes for bucket: {}", bucketName, e); - // Don't throw the exception - FTS indexes are optional - // The bucket creation should continue even if FTS fails - logger.warn("⚠️ Continuing bucket creation without FTS indexes"); + logger.error("Failed to create FTS indexes for bucket: {}", bucketName, e); + logger.warn("Continuing bucket creation without FTS indexes"); } } - + private void createGsiIndexes(Cluster cluster, String bucketName) throws Exception { try { - // Use the GsiIndexService to create GSI indexes from gsi-indexes.sql gsiIndexService.createGsiIndexes(cluster, bucketName); } catch (Exception e) { - logger.error("❌ Failed to create GSI indexes for bucket: {}", bucketName, e); - // Don't throw the exception - continue with bucket creation - // Some GSI indexes might be critical but we'll let the system continue - logger.warn("⚠️ Continuing bucket creation - some GSI indexes may be missing"); + logger.error("Failed to create GSI indexes for bucket: {}", bucketName, e); + logger.warn("Continuing bucket creation - some GSI indexes may be missing"); } } - + private void markAsFhirBucketWithConfig(String bucketName, String connectionName, FhirConversionRequest request) throws Exception { - // Get connection to insert the FHIR configuration document Cluster cluster = connectionService.getConnection(connectionName); if (cluster == null) { throw new IllegalStateException("No active Couchbase connection found for: " + connectionName); } - - // Use custom configuration if provided, otherwise use defaults - FhirBucketConfig customConfig = (request != null) ? request.getFhirConfiguration() : null; - - // Create profile configuration - var profileConfig = com.couchbase.client.java.json.JsonArray.create(); + + FhirBucketConfig customConfig = request != null ? request.getFhirConfiguration() : null; + + JsonArray profileConfig = JsonArray.create(); if (customConfig != null && customConfig.getProfiles() != null && !customConfig.getProfiles().isEmpty()) { for (FhirBucketConfig.Profile profile : customConfig.getProfiles()) { - profileConfig.add(com.couchbase.client.java.json.JsonObject.create() + profileConfig.add(JsonObject.create() .put("profile", profile.getProfile() != null ? profile.getProfile() : "US Core") .put("version", profile.getVersion() != null ? profile.getVersion() : "6.1.0")); } } else { - // Default profile - profileConfig.add(com.couchbase.client.java.json.JsonObject.create() - .put("profile", "US Core") - .put("version", "6.1.0")); + profileConfig.add(JsonObject.create().put("profile", "US Core").put("version", "6.1.0")); } - - // Create validation configuration (simplified structure) - var validationConfig = com.couchbase.client.java.json.JsonObject.create(); + + JsonObject validationConfig = JsonObject.create(); if (customConfig != null && customConfig.getValidation() != null) { FhirBucketConfig.Validation validation = customConfig.getValidation(); validationConfig .put("mode", validation.getMode() != null ? validation.getMode() : "lenient") .put("profile", validation.getProfile() != null ? validation.getProfile() : "none"); } else { - // Default validation (simplified) - validationConfig - .put("mode", "lenient") - .put("profile", "none"); + validationConfig.put("mode", "lenient").put("profile", "none"); } - - // Create logs configuration - var logsConfig = com.couchbase.client.java.json.JsonObject.create(); + + JsonObject logsConfig = JsonObject.create(); if (customConfig != null && customConfig.getLogs() != null) { FhirBucketConfig.Logs logs = customConfig.getLogs(); logsConfig @@ -447,7 +382,6 @@ private void markAsFhirBucketWithConfig(String bucketName, String connectionName .put("number", logs.getNumber() > 0 ? logs.getNumber() : 30) .put("s3Endpoint", logs.getS3Endpoint() != null ? logs.getS3Endpoint() : ""); } else { - // Default logs logsConfig .put("enableSystem", false) .put("enableCRUDAudit", false) @@ -456,40 +390,29 @@ private void markAsFhirBucketWithConfig(String bucketName, String connectionName .put("number", 30) .put("s3Endpoint", ""); } - - // Create the comprehensive FHIR configuration document - // Format createdAt as human-readable: "November 14, 2025 at 10:30:45 PM PST" - java.time.ZonedDateTime now = java.time.ZonedDateTime.now(); - java.time.format.DateTimeFormatter formatter = java.time.format.DateTimeFormatter.ofPattern("MMMM dd, yyyy 'at' hh:mm:ss a z"); + + ZonedDateTime now = ZonedDateTime.now(); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("MMMM dd, yyyy 'at' hh:mm:ss a z"); String createdAtFormatted = now.format(formatter); - - var fhirConfig = com.couchbase.client.java.json.JsonObject.create() + + JsonObject fhirConfig = JsonObject.create() .put("isFHIR", true) .put("createdAt", createdAtFormatted) .put("version", "1") .put("description", "FHIR-enabled bucket configuration") - .put("fhirRelease", customConfig != null && customConfig.getFhirRelease() != null ? - customConfig.getFhirRelease() : "Release 4") + .put("fhirRelease", customConfig != null && customConfig.getFhirRelease() != null ? customConfig.getFhirRelease() : "Release 4") .put("profiles", profileConfig) .put("validation", validationConfig) .put("logs", logsConfig); - - // Insert the document into Admin/config collection + String documentId = "fhir-config"; - String sql = String.format( - "INSERT INTO `%s`.`Admin`.`config` (KEY, VALUE) VALUES ('%s', %s)", - bucketName, documentId, fhirConfig.toString() - ); - + String sql = String.format("INSERT INTO `%s`.`Admin`.`config` (KEY, VALUE) VALUES ('%s', %s)", bucketName, documentId, fhirConfig.toString()); try { - cluster.query(sql); - // Clear both caches since we just updated the FHIR configuration + cluster.query(sql); fhirBucketConfigService.clearConfigCache(bucketName, connectionName); fhirBucketValidator.clearCache(bucketName, connectionName); - } catch (Exception e) { if (e.getMessage() != null && e.getMessage().contains("already exists")) { - // Even if it already exists, clear both caches in case it was updated fhirBucketConfigService.clearConfigCache(bucketName, connectionName); fhirBucketValidator.clearCache(bucketName, connectionName); } else { @@ -498,194 +421,122 @@ private void markAsFhirBucketWithConfig(String bucketName, String connectionName } } } - - /** - * Get all FHIR-enabled buckets - */ + + /** Get all FHIR-enabled buckets. */ public List getFhirBuckets(String connectionName) { try { Cluster cluster = connectionService.getConnection(connectionName); if (cluster == null) { return new ArrayList<>(); } - - // Query all buckets and check which ones are FHIR-enabled + String sql = "SELECT name FROM system:buckets WHERE namespace_id = 'default'"; var result = cluster.query(sql); var buckets = new ArrayList(); - + for (var row : result.rowsAsObject()) { - String bucketName = row.getString("name"); - if (isFhirBucket(bucketName, connectionName)) { - buckets.add(bucketName); + String bucket = row.getString("name"); + if (isFhirBucket(bucket, connectionName)) { + buckets.add(bucket); } } - return buckets; } catch (Exception e) { logger.error("Failed to get FHIR buckets: {}", e.getMessage()); return new ArrayList<>(); } } - - /** - * Check if a bucket is FHIR-enabled by looking for the configuration document - * Uses REST API to avoid SDK retry issues - */ + + /** Check if a bucket is FHIR-enabled by looking for the configuration document via REST API. */ public boolean isFhirBucket(String bucketName, String connectionName) { try { - // Get connection details from connection service String hostname = connectionService.getHostname(connectionName); int port = connectionService.getPort(connectionName); var connectionDetails = connectionService.getConnectionDetails(connectionName); - if (hostname == null || connectionDetails == null) { return false; } - - // Use REST API to check if fhir-config document exists - return checkFhirConfigViaRest(hostname, port, bucketName, connectionName, - connectionDetails.getUsername(), - connectionDetails.getPassword()); - + return checkFhirConfigViaRest(bucketName, connectionName); } catch (Exception e) { logger.error("Failed to check if bucket {} is FHIR-enabled: {}", bucketName, e.getMessage()); return false; } } - - /** - * Check FHIR config document via REST API using SDK's HTTP client - */ - private boolean checkFhirConfigViaRest(String hostname, int port, String bucketName, String connectionName, - String username, String password) { + + private boolean checkFhirConfigViaRest(String bucketName, String connectionName) { try { - // Get the cluster connection to access the HTTP client Cluster cluster = connectionService.getConnection(connectionName); if (cluster == null) { return false; } - - // Use SDK's HTTP client - com.couchbase.client.java.http.CouchbaseHttpClient httpClient = cluster.httpClient(); - - // Construct the REST path for the fhir-config document - com.couchbase.client.java.http.HttpResponse httpResponse = httpClient.get( - com.couchbase.client.java.http.HttpTarget.manager(), - com.couchbase.client.java.http.HttpPath.of( - "/pools/default/buckets/{}/scopes/Admin/collections/config/docs/fhir-config", - bucketName - ) + + CouchbaseHttpClient httpClient = cluster.httpClient(); + HttpResponse httpResponse = httpClient.get( + HttpTarget.manager(), + HttpPath.of("/pools/default/buckets/{}/scopes/Admin/collections/config/docs/fhir-config", bucketName) ); - + int statusCode = httpResponse.statusCode(); - - // If we get a 200, the document exists (FHIR-enabled) - if (statusCode == 200) { - return true; - } - - // 404 means document doesn't exist (not FHIR-enabled) - if (statusCode == 404) { - return false; - } - - // Other status codes are unexpected + if (statusCode == 200) return true; + if (statusCode == 404) return false; logger.error("Unexpected status code {} when checking FHIR config for bucket {}", statusCode, bucketName); return false; - } catch (Exception e) { - // Log the error but don't fail the check logger.error("REST check for FHIR config failed: {}", e.getMessage()); return false; } } - - /** - * Persist the current in-memory OAuth signing key to fhir.Admin.config collection - * Called during FHIR bucket initialization (Step 11) - * Uses the SAME key that was generated on startup for authentication - */ + + /** Persist the current in-memory OAuth signing key to fhir.Admin.config collection. */ private void createOAuthSigningKey(Cluster cluster, String bucketName) { try { Collection configCollection = cluster.bucket(bucketName).scope("Admin").collection("config"); - - // Check if key already exists try { configCollection.get("oauth-signing-key"); - logger.debug("🔐 OAuth signing key already exists in fhir.Admin.config"); + logger.debug("OAuth signing key already exists in fhir.Admin.config"); return; } catch (com.couchbase.client.core.error.DocumentNotFoundException e) { - // Key doesn't exist, persist it + // proceed + } + + if (authorizationServerConfig == null) { + throw new IllegalStateException("AuthorizationServerConfig not available to persist OAuth signing key"); } - - // Get the CURRENT in-memory key from AuthorizationServerConfig - // This is the same key used for all JWTs issued since startup + RSAKey rsaKey = authorizationServerConfig.getCurrentKey(); if (rsaKey == null) { throw new IllegalStateException("No OAuth signing key available to persist"); } - + String keyId = rsaKey.getKeyID(); - logger.debug("🔐 [STEP-11] Persisting in-memory OAuth signing key - hasPrivateKey: {}, kid: {}", rsaKey.isPrivate(), keyId); - - // Serialize the RSAKey directly (not via JWKSet) to ensure private parts are included String jwkJson = rsaKey.toJSONString(); - logger.debug("🔐 [STEP-11] Serialized RSAKey JSON length: {} chars", jwkJson.length()); - logger.debug("🔐 [STEP-11] RSAKey JSON (first 200 chars): {}", - jwkJson.length() > 200 ? jwkJson.substring(0, 200) + "..." : jwkJson); - - // Wrap in JWKSet format String jwkSetJson = String.format("{\"keys\":[%s]}", jwkJson); - logger.debug("🔐 [STEP-11] Complete JWKSet JSON length: {} chars", jwkSetJson.length()); - - // Verify it can be parsed back with private key - JWKSet testParse = JWKSet.parse(jwkSetJson); - RSAKey testKey = (RSAKey) testParse.getKeys().get(0); - logger.debug("🔐 [STEP-11] Verification after parse - hasPrivateKey: {}", testKey.isPrivate()); - - if (!testKey.isPrivate()) { - logger.error("❌ [STEP-11] BUG: Serialization lost private key! This should never happen."); - throw new IllegalStateException("JWKSet serialization lost private key"); - } - - // Create document - // IMPORTANT: Store jwkSet as raw string to preserve private key parts - // JsonObject.fromJson() can strip private fields during re-serialization + JsonObject doc = JsonObject.create() - .put("id", "oauth-signing-key") - .put("type", "jwk") - .put("jwkSetString", jwkSetJson) // Store as string - .put("createdAt", Instant.now().toString()) - .put("updatedAt", Instant.now().toString()); - + .put("id", "oauth-signing-key") + .put("type", "jwk") + .put("jwkSetString", jwkSetJson) + .put("createdAt", Instant.now().toString()) + .put("updatedAt", Instant.now().toString()); + configCollection.upsert("oauth-signing-key", doc); - logger.debug("✅ Persisted OAuth signing key to fhir.Admin.config (kid: {})", keyId); - logger.debug("🔐 All JWTs issued since startup remain valid - same key now persisted"); - - // Verify what was actually saved by reading it back + logger.debug("Persisted OAuth signing key to fhir.Admin.config (kid: {})", keyId); try { var savedDoc = configCollection.get("oauth-signing-key").contentAsObject(); String savedJwkStr = savedDoc.getString("jwkSetString"); - JWKSet verifySet = JWKSet.parse(savedJwkStr); - RSAKey verifyKey = (RSAKey) verifySet.getKeys().get(0); - logger.debug("🔐 [STEP-11] Verification after save - hasPrivateKey: {}", verifyKey.isPrivate()); - if (!verifyKey.isPrivate()) { - logger.error("❌ [STEP-11] Saved document lost private key!"); - } + JWKSet.parse(savedJwkStr); } catch (Exception e) { - logger.warn("⚠️ [STEP-11] Could not verify saved key: {}", e.getMessage()); + logger.warn("Could not verify saved key: {}", e.getMessage()); } - - // Invalidate the cached key in AuthorizationServerConfig so it reloads from Couchbase + if (authorizationServerConfig != null) { authorizationServerConfig.invalidateKeyCache(); } - + } catch (Exception e) { - // Don't fail initialization if key creation fails - logger.error("❌ Failed to create OAuth signing key: {}", e.getMessage(), e); - logger.warn("⚠️ OAuth tokens will use ephemeral key (won't survive restarts)"); + logger.error("Failed to create OAuth signing key: {}", e.getMessage(), e); + logger.warn("OAuth tokens will use ephemeral key (won't survive restarts)"); } } } + diff --git a/backend/src/main/java/com/couchbase/admin/initialization/model/InitializationStatus.java b/backend/src/main/java/com/couchbase/admin/initialization/model/InitializationStatus.java index 5db2474b..b39bd688 100644 --- a/backend/src/main/java/com/couchbase/admin/initialization/model/InitializationStatus.java +++ b/backend/src/main/java/com/couchbase/admin/initialization/model/InitializationStatus.java @@ -38,6 +38,7 @@ public enum Status { private boolean hasConnection; private boolean bucketExists; private boolean isFhirInitialized; + private boolean isAdminInitialized; public InitializationStatus() { } @@ -95,5 +96,13 @@ public boolean isFhirInitialized() { public void setFhirInitialized(boolean fhirInitialized) { this.isFhirInitialized = fhirInitialized; } + + public boolean isAdminInitialized() { + return isAdminInitialized; + } + + public void setAdminInitialized(boolean adminInitialized) { + this.isAdminInitialized = adminInitialized; + } } diff --git a/backend/src/main/java/com/couchbase/admin/initialization/service/InitializationService.java b/backend/src/main/java/com/couchbase/admin/initialization/service/InitializationService.java index dc45d82d..ce99946b 100644 --- a/backend/src/main/java/com/couchbase/admin/initialization/service/InitializationService.java +++ b/backend/src/main/java/com/couchbase/admin/initialization/service/InitializationService.java @@ -4,12 +4,18 @@ import com.couchbase.admin.initialization.model.InitializationStatus; import com.couchbase.admin.initialization.model.InitializationStatus.Status; import com.couchbase.client.java.Cluster; +import com.couchbase.client.java.manager.collection.CollectionSpec; import com.couchbase.fhir.auth.AuthorizationServerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + /** * Service to check FHIR system initialization status * In single-tenant mode, we expect exactly one bucket named "fhir" @@ -73,6 +79,9 @@ public InitializationStatus checkStatus(String connectionName) { } logger.debug("✅ Bucket '{}' exists", FHIR_BUCKET_NAME); + + boolean adminInitialized = checkAdminInitialization(cluster, FHIR_BUCKET_NAME); + status.setAdminInitialized(adminInitialized); // Step 3: Check if bucket is FHIR-initialized (has Admin.config.fhir-config document) boolean isFhirInitialized = checkFhirInitialization(cluster, FHIR_BUCKET_NAME, connectionName); @@ -103,6 +112,34 @@ public InitializationStatus checkStatus(String connectionName) { return status; } + + /** + * Check if the Admin scope and required collections exist. + */ + public boolean checkAdminInitialization(Cluster cluster, String bucketName) { + try { + var collectionManager = cluster.bucket(bucketName).collections(); + var scopes = collectionManager.getAllScopes(); + + var adminScopeOpt = scopes.stream() + .filter(scope -> "Admin".equals(scope.name())) + .findFirst(); + + if (adminScopeOpt.isEmpty()) { + return false; + } + + Set expected = new HashSet<>(List.of("config", "users", "tokens", "clients", "cache", "bulk_groups")); + Set existing = adminScopeOpt.get().collections().stream() + .map(CollectionSpec::name) + .collect(Collectors.toSet()); + + return existing.containsAll(expected); + } catch (Exception e) { + logger.warn("⚠️ Failed to check Admin initialization for bucket '{}': {}", bucketName, e.getMessage()); + return false; + } + } /** * Check if a bucket exists in the cluster diff --git a/backend/src/main/java/com/couchbase/common/config/JwtAuthenticationConverterConfig.java b/backend/src/main/java/com/couchbase/common/config/JwtAuthenticationConverterConfig.java index b9df4585..e9721c23 100644 --- a/backend/src/main/java/com/couchbase/common/config/JwtAuthenticationConverterConfig.java +++ b/backend/src/main/java/com/couchbase/common/config/JwtAuthenticationConverterConfig.java @@ -1,6 +1,7 @@ package com.couchbase.common.config; import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter; @@ -14,7 +15,12 @@ public class JwtAuthenticationConverterConfig { @Bean - @ConditionalOnMissingBean + // Only create this bean when Keycloak IS in use (external auth) and no other bean + // with the same name exists. When embedded Authorization Server is enabled + // (app.security.use-keycloak=false), the embedded config supplies a converter + // and we must not register a second bean with the same name. + @ConditionalOnProperty(name = "app.security.use-keycloak", havingValue = "true") + @ConditionalOnMissingBean(name = "jwtAuthenticationConverter") public JwtAuthenticationConverter jwtAuthenticationConverter() { JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter(); // keep default authority prefix ("SCOPE_"), adjust if needed diff --git a/backend/src/main/resources/application.yml b/backend/src/main/resources/application.yml index 7e1c3921..6b6606df 100644 --- a/backend/src/main/resources/application.yml +++ b/backend/src/main/resources/application.yml @@ -3,25 +3,13 @@ spring: name: backend config: import: classpath:fhir.yml - security: - oauth2: - resourceserver: - jwt: - jwk-set-uri: http://keycloak:8080/auth/realms/fhir/protocol/openid-connect/certs - # Enable Virtual Threads (Java 21+) - # Eliminates thread pool exhaustion under high concurrency (1000+ concurrent users) - # Virtual threads are lightweight and yield during I/O operations (Couchbase queries, JSON parsing) - threads: - virtual: - enabled: true - -# OAuth2 Authorization Server Configuration -# Default value for local development -# Production deployments should set this in config.yaml (takes precedence at runtime) + app: - baseUrl: ${APP_BASE_URL:http://localhost} + baseUrl: ${APP_BASE_URL:http://localhost:8080/fhir} + security: - use-keycloak: true + use-keycloak: false + cors: allowed-origins: ${CORS_ALLOWED_ORIGINS:*} allowed-methods: ${CORS_ALLOWED_METHODS:GET,POST,PUT,DELETE,OPTIONS} @@ -64,12 +52,6 @@ server: tomcat: accesslog: enabled: false - # With virtual threads enabled, Tomcat thread pool sizing becomes less critical - # Virtual threads handle concurrency efficiently without traditional thread pool limits - # Legacy note: Set environment variables (SERVER_TOMCAT_THREADS_MAX, etc.) to override if needed - # Spring Boot defaults: 200 max threads, 10 min-spare, 100 accept-count, 10000 max-connections - -# Logging configuration for containers logging: level: '[ca.uhn.fhir.rest.server.RestfulServer]': false @@ -80,5 +62,4 @@ logging: '[org.springframework.web.servlet]': ERROR '[com.couchbase.transactions]': ERROR pattern: - console: "%d{HH:mm:ss.SSS} %-5level %logger{20} - %msg%n" -# The resource values above will be resolved from environment variables at runtime (docker-compose/.env) + console: '%d{HH:mm:ss.SSS} %-5level %logger{20} - %msg%n' diff --git a/docker-compose.user.yml b/docker-compose.user.yml new file mode 100644 index 00000000..2db05a4e --- /dev/null +++ b/docker-compose.user.yml @@ -0,0 +1,59 @@ +services: + fhir-server: + image: ghcr.io/couchbaselabs/couchbase-fhir-ce/fhir-server:${FHIR_BACKEND_TAG:-latest} + user: ${FHIR_RUN_UID:-1000}:${FHIR_RUN_GID:-1000} + mem_limit: 3g + mem_reservation: 2g + environment: + SPRING_PROFILES_ACTIVE: prod + DEPLOYED_ENV: container + LOGGING_FILE_NAME: /app/logs/fhir.log + SDK_LOGGING_FILE_NAME: /app/logs/sdk.log + APP_BASE_URL: http://example.com/fhir + JAVA_TOOL_OPTIONS: -Xms1g -Xmx2g -Xss512k -XX:MaxDirectMemorySize=512m -XX:+UseG1GC + -XX:MaxGCPauseMillis=200 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/app/logs/heap.hprof + -XX:+ExitOnOutOfMemoryError -Xlog:gc*:file=/app/logs/gc.log:time,uptime,level,tags:filecount=5,filesize=10M + volumes: + - ./config.yaml:/config.yaml:ro + - ./logs:/app/logs + healthcheck: + test: + - CMD-SHELL + - curl -f http://localhost:8080/actuator/health || exit 1 + interval: 30s + timeout: 5s + retries: 3 + start_period: 60s + expose: + - '8080' + logging: + driver: json-file + options: + max-size: 50m + max-file: '3' + restart: unless-stopped + fhir-admin: + image: ghcr.io/couchbaselabs/couchbase-fhir-ce/fhir-admin:${FHIR_FRONTEND_TAG:-latest} + environment: + NGINX_ENTRYPOINT_QUIET_LOGS: 1 + logging: + driver: json-file + options: + max-size: 10m + max-file: '1' + depends_on: + - fhir-server + expose: + - '80' + restart: unless-stopped + haproxy: + image: haproxy:2.9-alpine + ports: + - 80:80 + volumes: + - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro + depends_on: + - fhir-admin + - fhir-server + restart: unless-stopped +volumes: {} diff --git a/docs/README.md b/docs/README.md index fd9d01af..6da85add 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,7 +14,24 @@ couchbase-fhir-ce/ ├── frontend/ # React Admin UI ├── config.yaml # Application configuration ├── README.md # This file -└── PROJECT_GUIDE.md # Comprehensive development guide +# Couchbase FHIR CE + +Open source FHIR server and admin UI with Couchbase and Spring Boot with HAPI + +## Overview + +Couchbase FHIR CE is a comprehensive FHIR (Fast Healthcare Interoperability) server implementation built on Couchbase with a modern React-based admin interface. This project provides a complete solution for healthcare data management and FHIR compliance. + +## Project Structure + +``` +couchbase-fhir-ce/ +├── backend/ # Spring Boot FHIR Server +├── frontend/ # React Admin UI +├── config.yaml # Application configuration (user-editable) +├── docker-compose.* # generated / templates +├── scripts/ # helpers (generate, keycloak helpers, etc.) +└── docs/ # documentation (this folder) ``` ## Quick Start @@ -25,14 +42,14 @@ couchbase-fhir-ce/ - Node.js 18+ - Couchbase Server 7.0+ or Couchbase Capella account -### Backend Setup +### Backend Setup (development) ```bash cd backend mvn spring-boot:run ``` -### Frontend Setup +### Frontend Setup (development) ```bash cd frontend @@ -40,6 +57,24 @@ npm install npm run dev ``` +### Generate runtime compose files + +This repository now generates `docker-compose.yml` and `haproxy.cfg` from your `config.yaml` using the generator helper. This is the recommended way to produce a local runtime config that matches your `config.yaml` settings. + +```bash +# From project root +./scripts/generate.py ./config.yaml +# This writes/backs-up: docker-compose.yml and haproxy.cfg +``` + +After generation you can start services: + +```bash +docker compose up -d +``` + +See `scripts/generate.py` for details about what gets generated and which settings are honored. + ## Documentation For detailed information about: @@ -47,28 +82,41 @@ For detailed information about: - **Project Architecture**: See `PROJECT_GUIDE.md` - **Backend Architecture**: See `backend/ARCHITECTURE.md` - **Development Guidelines**: See `PROJECT_GUIDE.md` -- **Team Responsibilities**: See `PROJECT_GUIDE.md` ## Key Features - **FHIR R4 Compliance**: Full FHIR R4 resource support - **Couchbase Integration**: Native Couchbase data storage - **Admin UI**: Modern React-based management interface +- **SMART on FHIR**: OAuth2 / OpenID Connect support for SMART apps (see `docs/SMART_AND_KEYCLOAK.md`) - **Multi-tenant Support**: Tenant-based FHIR resource isolation - **Audit Logging**: Comprehensive audit trail - **Health Monitoring**: System health and metrics dashboard -## License +## SMART on FHIR and Keycloak + +SMART on FHIR support is included. You can use a third-party OIDC provider or optionally enable a Keycloak add-on we provide helper scripts for. The detailed guide lives at [docs/SMART_AND_KEYCLOAK.md](SMART_AND_KEYCLOAK.md) and covers: + +- Which `config.yaml` settings control Keycloak/SMART behavior +- How to enable Keycloak and write `.env` using `scripts/enable-keycloak.sh` +- How to seed Keycloak with clients, scopes and a test user using `scripts/keycloak/seed_keycloak.sh` +- How to generate `docker-compose.yml` and `haproxy.cfg` using `scripts/generate.py` -This project is licensed under the terms specified in the LICENSE file. +Note: by default the project uses the embedded Spring Authorization Service for OAuth/OIDC. Keycloak is an opt-in alternative for users who prefer to run a dedicated OIDC server. ---- +If you want to enable Keycloak quickly, edit `config.yaml` (or `config.yaml.template`) and set the `keycloak` section, then run: -**For detailed development information, please refer to [PROJECT_GUIDE.md](./PROJECT_GUIDE.md)** +```bash +./scripts/enable-keycloak.sh ./config.yaml +./scripts/generate.py ./config.yaml +docker compose up -d +``` + +This will create/update `.env`, attempt to add Keycloak services to docker-compose files and write a realm import JSON under `scripts/keycloak/realm.json`. ## Docker Deployment -See [Docker-Deployment.md](./Docker-Deployment.md) for instructions on running this project with Docker and Docker Compose. +See [Docker-Deployment.md](./Docker-Deployment.md) for additional deployment options and production recommendations. ### Compose Files Explained @@ -76,12 +124,12 @@ There are TWO compose files with different purposes: | File | Purpose | Builds from source? | Used by installer? | | ------------------------- | ----------------------------------------------------------------------- | ------------------------- | -------------------------------------------------- | -| `docker-compose.yaml` | Local development (iterate on code, build images locally) | Yes (`build:` directives) | No | +| `docker-compose.yml` | Local development / runtime compose generated from `config.yaml` | Yes (may contain `build`) | No | | `docker-compose.user.yml` | Distribution template consumed by `install.sh` (pulls pre-built images) | No (uses `image:` tags) | Yes (downloaded and saved as `docker-compose.yml`) | -When a user runs the one‑liner: +When a user runs the one‑liner installer: -``` +```bash curl -sSL https://raw.githubusercontent.com/couchbaselabs/couchbase-fhir-ce/master/install.sh | bash -s -- ./config.yaml ``` @@ -89,15 +137,13 @@ The script fetches `docker-compose.user.yml` from GitHub and writes it locally a #### Keeping Them in Sync -If you make a functional change (environment variables, volumes, ports) to `docker-compose.yaml` that should also affect user deployments, manually port the relevant parts to `docker-compose.user.yml` and then: +If you change runtime environment variables, ports, volumes or service names and want those changes reflected in the user distribution, copy the relevant parts into `docker-compose.user.yml` and then: -1. Run the checksum helper to refresh hashes: - ```bash - ./scripts/update-checksums.sh - ``` -2. Commit the updated `docker-compose.user.yml` and `install.sh`. +```bash +./scripts/update-checksums.sh +``` -If you only change build-time details (e.g., adding a `build` arg), you typically do NOT need to update the user template. +Use `DRY_RUN=1` to preview or `SKIP_HAPROXY=1` if only the compose file changed. #### Customizing Runtime User @@ -112,26 +158,8 @@ This helps avoid permission issues for bind-mounted log directories. ### Updating Installer Hashes -The script `scripts/update-checksums.sh` recalculates SHA256 hashes for: - -- `docker-compose.user.yml` (distributed as `docker-compose.yml`) -- `haproxy.cfg` - -It then updates both checksum blocks in `install.sh` (Linux `sha256sum` and macOS `shasum` fallback). Use `DRY_RUN=1` to preview or `SKIP_HAPROXY=1` if only the compose file changed. - -Example: - -```bash -./scripts/update-checksums.sh # update both -SKIP_HAPROXY=1 ./scripts/update-checksums.sh -DRY_RUN=1 ./scripts/update-checksums.sh -``` - -If integrity verification fails for users, ensure: - -- They are retrieving the latest `install.sh`. -- The hashes in `install.sh` match the live raw GitHub contents of the downloaded files. +The script `scripts/update-checksums.sh` recalculates SHA256 hashes for the distribution artifacts and updates `install.sh` accordingly. ## Log Rotation & S3 Uploads -Log Rotation is enabled by default. ~~Rotated logs can be configured to be uploaded to an S3 Bucket for complying with Audit requiremeents.~~ **Note: S3 upload functionality is currently disabled for the Beta release.** To learn more read [LOG_ROTATION_AND_S3_UPLOAD.md](./LOG_ROTATION_AND_S3_UPLOAD.md) +Log Rotation is enabled by default. Note: S3 upload functionality is currently disabled for the Beta release. To learn more read [LOG_ROTATION_AND_S3_UPLOAD.md](./LOG_ROTATION_AND_S3_UPLOAD.md) diff --git a/docs/SMART_AND_KEYCLOAK.md b/docs/SMART_AND_KEYCLOAK.md new file mode 100644 index 00000000..6dfa89ba --- /dev/null +++ b/docs/SMART_AND_KEYCLOAK.md @@ -0,0 +1,160 @@ +g# SMART on FHIR and Keycloak — Quick Guide + +This document explains how to enable SMART on FHIR support for Couchbase FHIR CE, and how to use the optional Keycloak add-on to provide an OIDC provider for development and testing. + +Contents: +- Quick overview +- Config settings (`config.yaml`) +- Generate runtime compose files (`scripts/generate.py`) +- Enable Keycloak helper (`scripts/enable-keycloak.sh`) +- Seed Keycloak (`scripts/keycloak/seed_keycloak.sh`) +- Using an external OIDC provider + +## Quick overview + +- The backend supports OAuth2 / OpenID Connect for SMART on FHIR. The FHIR server validates incoming access tokens using a JWKS endpoint (JWKS URI). +- You can either point the server at an external OIDC provider (Auth0, Cognito, enterprise IdP) or run Keycloak locally using the included helpers. + +Note: by default the project uses the embedded Spring Authorization Service for OAuth/OIDC. The Keycloak helpers and configuration are optional — enable them only if you want to run Keycloak as your OIDC provider. + +## Relevant `config.yaml` settings + +Edit `config.yaml` (or `config.yaml.template`) to set your application base URL and Keycloak settings. Important fields: + +- `app.baseUrl` — public URL for your FHIR server (required by OAuth redirect config) +- `deploy.tls.enabled` — whether TLS/HTTPS is enabled for HAProxy (affects ports and cert mounting) + +Keycloak-specific section (optional): + +```yaml +keycloak: + enabled: true # enable Keycloak integration helpers (not required if using external OIDC) + realm: "fhir-realm" # realm name to create/import + adminUser: "admin" # Keycloak admin username (used by helper scripts) + adminPassword: "admin" # Keycloak admin password + clientId: "fhir-server" # OAuth client id for the FHIR server + clientSecret: "" # (optional) client secret; if empty a secret will be generated + url: "http://localhost:8080/auth" # Optional; set if Keycloak runs at non-default URL +``` + +Notes: +- `keycloak.enabled` toggles whether the helper scripts attempt to insert Keycloak into compose files and write `.env` entries. It does not force you to use Keycloak — you can leave this section out and use any OIDC provider. +- The generator (`scripts/generate.py`) uses `app.baseUrl` and `deploy.tls` to produce `docker-compose.yml` and `haproxy.cfg` that match your runtime choices. + +## Generate docker-compose and HAProxy config + +Before starting containers, generate the runtime compose file and haproxy config from your `config.yaml`: + +```bash +./scripts/generate.py ./config.yaml +# Output: docker-compose.yml and haproxy.cfg (existing files are backed up) +``` + +After generation, start the stack: + +```bash +docker compose up -d +``` + +If you enabled Keycloak in `config.yaml` and used `scripts/enable-keycloak.sh` (see below), `docker compose up -d keycloak` will launch Keycloak as an additional service. + +## Enable Keycloak (helper) + +The repo includes `scripts/enable-keycloak.sh` which: + +- Parses `config.yaml` and writes Keycloak-related environment variables into `.env` +- Inserts a Keycloak service into local docker-compose files (best-effort) +- Derives and writes `KEYCLOAK_JWKS_URI` into `.env` +- Writes a small `scripts/keycloak/realm.json` helpful for import/seeding + +Usage: + +```bash +./scripts/enable-keycloak.sh ./config.yaml +``` + +This will: +- Create or update `.env` with `KEYCLOAK_*` vars +- Attempt to append a Keycloak service to existing `docker-compose*.yml` files (and create backups) +- Write `scripts/keycloak/realm.json` containing a basic client registration + +After running the enable helper, regenerate runtime files and start the stack: + +```bash +./scripts/generate.py ./config.yaml +docker compose up -d +``` + +### What the backend expects + +The backend reads the JWKS URI (Keycloak's `.../protocol/openid-connect/certs`) to validate tokens. The enable helper writes `KEYCLOAK_JWKS_URI` into `.env` so the backend can pick it up (via `application.yml` patching helper included in the script). + +If you run Keycloak on a different host/port, ensure `KEYCLOAK_URL` and `KEYCLOAK_REALM` are set in `.env` and that `KEYCLOAK_JWKS_URI` points to the provider's JWKS endpoint. + +## Seed Keycloak (clients, scopes, test user) + +Once Keycloak is running, use the seeding helper to create a client, scopes and a test user: + +```bash +# Ensure .env is present (created by enable-keycloak.sh) then: +./scripts/keycloak/seed_keycloak.sh .env +``` + +This script will: +- Wait for Keycloak to be reachable +- Create the realm (if missing) +- Create/update the client (clientId is taken from `KEYCLOAK_CLIENT_ID`) +- Ensure a client secret exists (prints it if generated) +- Optionally create a test user `smart.user@example.com` / `password123` +- Create a set of SMART/FHIR-related client-scopes commonly used by the frontend + +After seeding, update `.env` with any generated `KEYCLOAK_CLIENT_SECRET` (the script prints it when created). + +## Using an external OIDC provider (no Keycloak) + +If you prefer to use a cloud or enterprise IdP, you don't need to enable the Keycloak helpers. Instead: + +1. Configure your IdP with a confidential client for the FHIR server. Set allowed redirect URIs (e.g. `http://localhost:8080/authorized`) and generate a client secret. +2. In your backend/hosting environment, set the equivalent of `FHIR_OIDC_JWKS_URI` (or `KEYCLOAK_JWKS_URI` if using the same env variable convention) to point to the provider's JWKS endpoint. +3. Provide the client id and secret to the frontend and backend as appropriate (via `.env` or `application.yml`). + +The minimum the server needs to validate tokens is the provider's JWKS URI and the expected issuer (the backend uses configured OIDC settings to validate token issuer/audience). + +## Common troubleshooting tips + +- If the backend rejects tokens, verify `KEYCLOAK_JWKS_URI` is reachable from the container and contains keys. +- If the frontend fails OAuth redirect, ensure `app.baseUrl` is correct and the IdP client redirect URIs include `http(s):///authorized`. +- Check `docker compose logs keycloak` and `docker compose logs fhir-server` for errors. + +## Example minimal workflow (local dev) + +1. Edit `config.yaml` and set `app.baseUrl: "http://localhost:8080/fhir"` and `keycloak` block with `enabled: true` and admin credentials. +2. Run the enable helper: + +```bash +./scripts/enable-keycloak.sh ./config.yaml +``` + +3. Generate runtime compose files: + +```bash +./scripts/generate.py ./config.yaml +``` + +4. Start Keycloak and services: + +```bash +docker compose up -d keycloak fhir-server fhir-admin haproxy +``` + +5. Seed Keycloak: + +```bash +./scripts/keycloak/seed_keycloak.sh .env +``` + +6. Open the admin UI and test SMART flows with the client created by the seed script. + +--- + +If you need help adapting this guide to a specific cloud IdP or enterprise setup, open an issue or ask for assistance in the project chat/history.