diff --git a/.testcoverage.yml b/.testcoverage.yml index c05f4e3d..39020b51 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -6,6 +6,8 @@ exclude: - common/config/config.go - mocks - common/apis/* + - export_test.go + - export_test_integration.go # remove it later: - listener/reconciler/clusteraccess/subroutines.go - - listener/reconciler/singlecluster + diff --git a/cmd/gateway.go b/cmd/gateway.go index 301ef744..86556e2b 100644 --- a/cmd/gateway.go +++ b/cmd/gateway.go @@ -8,14 +8,13 @@ import ( "time" openmfpcontext "github.com/openmfp/golang-commons/context" + "github.com/openmfp/golang-commons/logger" "github.com/openmfp/golang-commons/sentry" "github.com/openmfp/golang-commons/traces" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" ctrl "sigs.k8s.io/controller-runtime" - "github.com/openmfp/golang-commons/logger" - "github.com/openmfp/kubernetes-graphql-gateway/gateway/manager" ) @@ -23,32 +22,26 @@ var gatewayCmd = &cobra.Command{ Use: "gateway", Short: "Run the GQL Gateway", Example: "go run main.go gateway", - RunE: func(_ *cobra.Command, _ []string) error { - log, err := setupLogger(defaultCfg.Log.Level) - if err != nil { - return fmt.Errorf("failed to setup logger: %w", err) - } - - log.Info().Str("LogLevel", log.GetLevel().String()).Msg("Starting gateway server...") + Run: func(_ *cobra.Command, _ []string) { + log.Info().Str("LogLevel", log.GetLevel().String()).Msg("Starting the Gateway...") ctx, _, shutdown := openmfpcontext.StartContext(log, appCfg, 1*time.Second) defer shutdown() if err := initializeSentry(ctx, log); err != nil { - return err + log.Fatal().Err(err).Msg("Failed to initialize Sentry") } ctrl.SetLogger(log.Logr()) gatewayInstance, err := manager.NewGateway(ctx, log, appCfg) if err != nil { - log.Error().Err(err).Msg("Error creating gateway") - return fmt.Errorf("failed to create gateway: %w", err) + log.Fatal().Err(err).Msg("Failed to create gateway") } tracingShutdown, err := initializeTracing(ctx, log) if err != nil { - return err + log.Fatal().Err(err).Msg("Failed to initialize tracing") } defer func() { if err := tracingShutdown(ctx); err != nil { @@ -56,7 +49,9 @@ var gatewayCmd = &cobra.Command{ } }() - return runServers(ctx, log, gatewayInstance) + if err := runServers(ctx, log, gatewayInstance); err != nil { + log.Fatal().Err(err).Msg("Failed to run servers") + } }, } @@ -71,7 +66,6 @@ func initializeSentry(ctx context.Context, log *logger.Logger) error { ) if err != nil { log.Fatal().Err(err).Msg("Sentry init failed") - return err } defer openmfpcontext.Recover(log) @@ -83,7 +77,6 @@ func initializeTracing(ctx context.Context, log *logger.Logger) (func(ctx contex shutdown, err := traces.InitProvider(ctx, defaultCfg.Tracing.Collector) if err != nil { log.Fatal().Err(err).Msg("unable to start gRPC-Sidecar TracerProvider") - return nil, err } return shutdown, nil } @@ -91,7 +84,6 @@ func initializeTracing(ctx context.Context, log *logger.Logger) (func(ctx contex shutdown, err := traces.InitLocalProvider(ctx, defaultCfg.Tracing.Collector, false) if err != nil { log.Fatal().Err(err).Msg("unable to start local TracerProvider") - return nil, err } return shutdown, nil } @@ -189,11 +181,3 @@ func runServers(ctx context.Context, log *logger.Logger, gatewayInstance http.Ha log.Info().Msg("Server shut down successfully") return nil } - -// setupLogger initializes the logger with the given log level -func setupLogger(logLevel string) (*logger.Logger, error) { - loggerCfg := logger.DefaultConfig() - loggerCfg.Name = "crdGateway" - loggerCfg.Level = logLevel - return logger.New(loggerCfg) -} diff --git a/cmd/listener.go b/cmd/listener.go index 862166fc..7a283117 100644 --- a/cmd/listener.go +++ b/cmd/listener.go @@ -3,7 +3,6 @@ package cmd import ( "context" "crypto/tls" - "os" kcpapis "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" kcpcore "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" @@ -21,6 +20,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" gatewayv1alpha1 "github.com/openmfp/kubernetes-graphql-gateway/common/apis/v1alpha1" + "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema" + "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/workspacefile" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler/clusteraccess" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler/kcp" @@ -74,6 +75,8 @@ var listenCmd = &cobra.Command{ } }, Run: func(cmd *cobra.Command, args []string) { + log.Info().Str("LogLevel", log.GetLevel().String()).Msg("Starting the Listener...") + ctx := ctrl.SetupSignalHandler() restCfg := ctrl.GetConfigOrDie() @@ -90,8 +93,7 @@ var listenCmd = &cobra.Command{ Scheme: scheme, }) if err != nil { - log.Error().Err(err).Msg("failed to create client from config") - os.Exit(1) + log.Fatal().Err(err).Msg("failed to create client from config") } reconcilerOpts := reconciler.ReconcilerOpts{ @@ -107,32 +109,34 @@ var listenCmd = &cobra.Command{ if appCfg.EnableKcp { kcpReconciler, err := kcp.NewKCPReconciler(appCfg, reconcilerOpts, log) if err != nil { - log.Error().Err(err).Msg("unable to create KCP reconciler") - os.Exit(1) + log.Fatal().Err(err).Msg("unable to create KCP reconciler") } // Start virtual workspace watching if path is configured if appCfg.Listener.VirtualWorkspacesConfigPath != "" { go func() { if err := kcpReconciler.StartVirtualWorkspaceWatching(ctx, appCfg.Listener.VirtualWorkspacesConfigPath); err != nil { - log.Error().Err(err).Msg("failed to start virtual workspace watching") - os.Exit(1) + log.Fatal().Err(err).Msg("failed to start virtual workspace watching") } }() } reconcilerInstance = kcpReconciler } else { - reconcilerInstance, err = clusteraccess.CreateMultiClusterReconciler(appCfg, reconcilerOpts, log) + ioHandler, err := workspacefile.NewIOHandler(appCfg.OpenApiDefinitionsPath) + if err != nil { + log.Fatal().Err(err).Msg("unable to create IO handler") + } + + reconcilerInstance, err = clusteraccess.NewClusterAccessReconciler(ctx, appCfg, reconcilerOpts, ioHandler, apischema.NewResolver(log), log) if err != nil { - log.Error().Err(err).Msg("unable to create cluster access reconciler") - os.Exit(1) + log.Fatal().Err(err).Msg("unable to create cluster access reconciler") } } // Setup reconciler with its own manager and start everything if err := startManagerWithReconciler(ctx, reconcilerInstance); err != nil { - os.Exit(1) + log.Fatal().Err(err).Msg("failed to start manager with reconciler") } }, } diff --git a/cmd/root.go b/cmd/root.go index 5f018184..3282f8ea 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -81,6 +81,14 @@ func initConfig() { v.SetDefault("gateway-url-graphql-suffix", "graphql") } +// setupLogger initializes the logger with the given log level +func setupLogger(logLevel string) (*logger.Logger, error) { + loggerCfg := logger.DefaultConfig() + loggerCfg.Name = "crdGateway" + loggerCfg.Level = logLevel + return logger.New(loggerCfg) +} + func Execute() { cobra.CheckErr(rootCmd.Execute()) } diff --git a/common/auth/metadata_injector.go b/common/auth/metadata_injector.go index 8766a403..ed7d5eeb 100644 --- a/common/auth/metadata_injector.go +++ b/common/auth/metadata_injector.go @@ -28,9 +28,23 @@ type MetadataInjectionConfig struct { HostOverride string // For virtual workspaces } +// MetadataInjector provides metadata injection services with structured logging +type MetadataInjector struct { + log *logger.Logger + client client.Client +} + +// NewMetadataInjector creates a new MetadataInjector service +func NewMetadataInjector(log *logger.Logger, client client.Client) *MetadataInjector { + return &MetadataInjector{ + log: log, + client: client, + } +} + // InjectClusterMetadata injects cluster metadata into schema JSON // This unified function handles both KCP and ClusterAccess use cases -func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config MetadataInjectionConfig, k8sClient client.Client, log *logger.Logger) ([]byte, error) { +func (m *MetadataInjector) InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config MetadataInjectionConfig) ([]byte, error) { // Parse the existing schema JSON var schemaData map[string]interface{} if err := json.Unmarshal(schemaJSON, &schemaData); err != nil { @@ -38,7 +52,7 @@ func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config Metada } // Determine the host to use - host := determineHost(config.Host, config.HostOverride, log) + host := m.determineHost(config.Host, config.HostOverride) // Create cluster metadata metadata := map[string]interface{}{ @@ -48,9 +62,9 @@ func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config Metada // Add auth data if configured if config.Auth != nil { - authMetadata, err := extractAuthDataForMetadata(ctx, config.Auth, k8sClient) + authMetadata, err := m.extractAuthDataForMetadata(ctx, config.Auth) if err != nil { - log.Warn().Err(err).Msg("failed to extract auth data for metadata") + m.log.Warn().Err(err).Msg("failed to extract auth data for metadata") } else if authMetadata != nil { metadata["auth"] = authMetadata } @@ -58,26 +72,26 @@ func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config Metada // Add CA data - prefer explicit CA config, fallback to kubeconfig CA if config.CA != nil { - caData, err := ExtractCAData(ctx, config.CA, k8sClient) + caData, err := ExtractCAData(ctx, config.CA, m.client) if err != nil { - log.Warn().Err(err).Msg("failed to extract CA data for metadata") + m.log.Warn().Err(err).Msg("failed to extract CA data for metadata") } else if caData != nil { metadata["ca"] = map[string]interface{}{ "data": base64.StdEncoding.EncodeToString(caData), } } } else if config.Auth != nil { - tryExtractKubeconfigCA(ctx, config.Auth, k8sClient, metadata, log) + m.tryExtractKubeconfigCA(ctx, config.Auth, metadata) } - return finalizeSchemaInjection(schemaData, metadata, host, config.Path, config.CA != nil || config.Auth != nil, log) + return m.finalizeSchemaInjection(schemaData, metadata, host, config.Path, config.CA != nil || config.Auth != nil) } // InjectKCPMetadataFromEnv injects KCP metadata using kubeconfig from environment // This is a convenience function for KCP use cases -func InjectKCPMetadataFromEnv(schemaJSON []byte, clusterPath string, log *logger.Logger, hostOverride ...string) ([]byte, error) { +func (m *MetadataInjector) InjectKCPMetadataFromEnv(schemaJSON []byte, clusterPath string, hostOverride ...string) ([]byte, error) { // Get kubeconfig from environment (same sources as ctrl.GetConfig()) - kubeconfigData, kubeconfigHost, err := extractKubeconfigFromEnv(log) + kubeconfigData, kubeconfigHost, err := m.extractKubeconfigFromEnv() if err != nil { return nil, fmt.Errorf("failed to extract kubeconfig data: %w", err) } @@ -95,7 +109,7 @@ func InjectKCPMetadataFromEnv(schemaJSON []byte, clusterPath string, log *logger } // Determine which host to use - host := determineKCPHost(kubeconfigHost, override, clusterPath, log) + host := m.determineKCPHost(kubeconfigHost, override, clusterPath) // Create cluster metadata with environment kubeconfig metadata := map[string]interface{}{ @@ -108,40 +122,40 @@ func InjectKCPMetadataFromEnv(schemaJSON []byte, clusterPath string, log *logger } // Extract CA data from kubeconfig if available - caData := extractCAFromKubeconfigData(kubeconfigData, log) + caData := m.extractCAFromKubeconfigData(kubeconfigData) if caData != nil { metadata["ca"] = map[string]interface{}{ "data": base64.StdEncoding.EncodeToString(caData), } } - return finalizeSchemaInjection(schemaData, metadata, host, clusterPath, caData != nil, log) + return m.finalizeSchemaInjection(schemaData, metadata, host, clusterPath, caData != nil) } // extractAuthDataForMetadata extracts auth data from AuthConfig for metadata injection -func extractAuthDataForMetadata(ctx context.Context, auth *gatewayv1alpha1.AuthConfig, k8sClient client.Client) (map[string]interface{}, error) { +func (m *MetadataInjector) extractAuthDataForMetadata(ctx context.Context, auth *gatewayv1alpha1.AuthConfig) (map[string]interface{}, error) { if auth == nil { return nil, nil } if auth.SecretRef != nil { - return extractTokenAuth(ctx, auth.SecretRef, k8sClient) + return m.extractTokenAuth(ctx, auth.SecretRef) } if auth.KubeconfigSecretRef != nil { - return extractKubeconfigAuth(ctx, auth.KubeconfigSecretRef, k8sClient) + return m.extractKubeconfigAuth(ctx, auth.KubeconfigSecretRef) } if auth.ClientCertificateRef != nil { - return extractClientCertAuth(ctx, auth.ClientCertificateRef, k8sClient) + return m.extractClientCertAuth(ctx, auth.ClientCertificateRef) } return nil, nil // No auth configured } // extractTokenAuth handles token-based authentication from SecretRef -func extractTokenAuth(ctx context.Context, secretRef *gatewayv1alpha1.SecretRef, k8sClient client.Client) (map[string]interface{}, error) { - secret, err := getSecret(ctx, secretRef.Name, secretRef.Namespace, k8sClient) +func (m *MetadataInjector) extractTokenAuth(ctx context.Context, secretRef *gatewayv1alpha1.SecretRef) (map[string]interface{}, error) { + secret, err := m.getSecret(ctx, secretRef.Name, secretRef.Namespace) if err != nil { return nil, fmt.Errorf("failed to get auth secret: %w", err) } @@ -158,8 +172,8 @@ func extractTokenAuth(ctx context.Context, secretRef *gatewayv1alpha1.SecretRef, } // extractKubeconfigAuth handles kubeconfig-based authentication from KubeconfigSecretRef -func extractKubeconfigAuth(ctx context.Context, kubeconfigRef *gatewayv1alpha1.KubeconfigSecretRef, k8sClient client.Client) (map[string]interface{}, error) { - secret, err := getSecret(ctx, kubeconfigRef.Name, kubeconfigRef.Namespace, k8sClient) +func (m *MetadataInjector) extractKubeconfigAuth(ctx context.Context, kubeconfigRef *gatewayv1alpha1.KubeconfigSecretRef) (map[string]interface{}, error) { + secret, err := m.getSecret(ctx, kubeconfigRef.Name, kubeconfigRef.Namespace) if err != nil { return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err) } @@ -176,8 +190,8 @@ func extractKubeconfigAuth(ctx context.Context, kubeconfigRef *gatewayv1alpha1.K } // extractClientCertAuth handles client certificate authentication from ClientCertificateRef -func extractClientCertAuth(ctx context.Context, certRef *gatewayv1alpha1.ClientCertificateRef, k8sClient client.Client) (map[string]interface{}, error) { - secret, err := getSecret(ctx, certRef.Name, certRef.Namespace, k8sClient) +func (m *MetadataInjector) extractClientCertAuth(ctx context.Context, certRef *gatewayv1alpha1.ClientCertificateRef) (map[string]interface{}, error) { + secret, err := m.getSecret(ctx, certRef.Name, certRef.Namespace) if err != nil { return nil, fmt.Errorf("failed to get client certificate secret: %w", err) } @@ -197,13 +211,13 @@ func extractClientCertAuth(ctx context.Context, certRef *gatewayv1alpha1.ClientC } // getSecret is a helper function to retrieve secrets with namespace defaulting -func getSecret(ctx context.Context, name, namespace string, k8sClient client.Client) (*corev1.Secret, error) { +func (m *MetadataInjector) getSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) { if namespace == "" { namespace = "default" } secret := &corev1.Secret{} - err := k8sClient.Get(ctx, types.NamespacedName{ + err := m.client.Get(ctx, types.NamespacedName{ Name: name, Namespace: namespace, }, secret) @@ -215,11 +229,11 @@ func getSecret(ctx context.Context, name, namespace string, k8sClient client.Cli } // extractKubeconfigFromEnv gets kubeconfig data from the same sources as ctrl.GetConfig() -func extractKubeconfigFromEnv(log *logger.Logger) ([]byte, string, error) { +func (m *MetadataInjector) extractKubeconfigFromEnv() ([]byte, string, error) { // Check KUBECONFIG environment variable first kubeconfigPath := os.Getenv("KUBECONFIG") if kubeconfigPath != "" { - log.Debug().Str("source", "KUBECONFIG env var").Str("path", kubeconfigPath).Msg("using kubeconfig from environment variable") + m.log.Debug().Str("source", "KUBECONFIG env var").Str("path", kubeconfigPath).Msg("using kubeconfig from environment variable") } // Fall back to default kubeconfig location if not set @@ -229,7 +243,7 @@ func extractKubeconfigFromEnv(log *logger.Logger) ([]byte, string, error) { return nil, "", fmt.Errorf("failed to determine kubeconfig location: %w", err) } kubeconfigPath = home + "/.kube/config" - log.Debug().Str("source", "default location").Str("path", kubeconfigPath).Msg("using default kubeconfig location") + m.log.Debug().Str("source", "default location").Str("path", kubeconfigPath).Msg("using default kubeconfig location") } // Check if file exists @@ -301,32 +315,32 @@ func stripVirtualWorkspacePath(hostURL string) string { } // extractCAFromKubeconfigData extracts CA certificate data from raw kubeconfig bytes -func extractCAFromKubeconfigData(kubeconfigData []byte, log *logger.Logger) []byte { +func (m *MetadataInjector) extractCAFromKubeconfigData(kubeconfigData []byte) []byte { config, err := clientcmd.Load(kubeconfigData) if err != nil { - log.Warn().Err(err).Msg("failed to parse kubeconfig for CA extraction") + m.log.Warn().Err(err).Msg("failed to parse kubeconfig for CA extraction") return nil } if config.CurrentContext == "" { - log.Warn().Msg("no current context in kubeconfig for CA extraction") + m.log.Warn().Msg("no current context in kubeconfig for CA extraction") return nil } context, exists := config.Contexts[config.CurrentContext] if !exists { - log.Warn().Str("context", config.CurrentContext).Msg("current context not found in kubeconfig for CA extraction") + m.log.Warn().Str("context", config.CurrentContext).Msg("current context not found in kubeconfig for CA extraction") return nil } cluster, exists := config.Clusters[context.Cluster] if !exists { - log.Warn().Str("cluster", context.Cluster).Msg("cluster not found in kubeconfig for CA extraction") + m.log.Warn().Str("cluster", context.Cluster).Msg("cluster not found in kubeconfig for CA extraction") return nil } if len(cluster.CertificateAuthorityData) == 0 { - log.Debug().Msg("no CA data found in kubeconfig") + m.log.Debug().Msg("no CA data found in kubeconfig") return nil } @@ -334,21 +348,21 @@ func extractCAFromKubeconfigData(kubeconfigData []byte, log *logger.Logger) []by } // extractCAFromKubeconfigB64 extracts CA certificate data from base64-encoded kubeconfig -func extractCAFromKubeconfigB64(kubeconfigB64 string, log *logger.Logger) []byte { +func (m *MetadataInjector) extractCAFromKubeconfigB64(kubeconfigB64 string) []byte { kubeconfigData, err := base64.StdEncoding.DecodeString(kubeconfigB64) if err != nil { - log.Warn().Err(err).Msg("failed to decode kubeconfig for CA extraction") + m.log.Warn().Err(err).Msg("failed to decode kubeconfig for CA extraction") return nil } - return extractCAFromKubeconfigData(kubeconfigData, log) + return m.extractCAFromKubeconfigData(kubeconfigData) } // tryExtractKubeconfigCA attempts to extract CA data from kubeconfig auth and adds it to metadata -func tryExtractKubeconfigCA(ctx context.Context, auth *gatewayv1alpha1.AuthConfig, k8sClient client.Client, metadata map[string]interface{}, log *logger.Logger) { - authMetadata, err := extractAuthDataForMetadata(ctx, auth, k8sClient) +func (m *MetadataInjector) tryExtractKubeconfigCA(ctx context.Context, auth *gatewayv1alpha1.AuthConfig, metadata map[string]interface{}) { + authMetadata, err := m.extractAuthDataForMetadata(ctx, auth) if err != nil { - log.Warn().Err(err).Msg("failed to extract auth data for CA extraction") + m.log.Warn().Err(err).Msg("failed to extract auth data for CA extraction") return } @@ -366,7 +380,7 @@ func tryExtractKubeconfigCA(ctx context.Context, auth *gatewayv1alpha1.AuthConfi return } - kubeconfigCAData := extractCAFromKubeconfigB64(kubeconfigB64, log) + kubeconfigCAData := m.extractCAFromKubeconfigB64(kubeconfigB64) if kubeconfigCAData == nil { return } @@ -374,13 +388,13 @@ func tryExtractKubeconfigCA(ctx context.Context, auth *gatewayv1alpha1.AuthConfi metadata["ca"] = map[string]interface{}{ "data": base64.StdEncoding.EncodeToString(kubeconfigCAData), } - log.Info().Msg("extracted CA data from kubeconfig") + m.log.Info().Msg("extracted CA data from kubeconfig") } // determineHost determines which host to use based on configuration -func determineHost(originalHost, hostOverride string, log *logger.Logger) string { +func (m *MetadataInjector) determineHost(originalHost, hostOverride string) string { if hostOverride != "" { - log.Info(). + m.log.Info(). Str("originalHost", originalHost). Str("overrideHost", hostOverride). Msg("using host override for virtual workspace") @@ -390,7 +404,7 @@ func determineHost(originalHost, hostOverride string, log *logger.Logger) string // For normal workspaces, ensure we use a clean host by stripping any virtual workspace paths cleanedHost := stripVirtualWorkspacePath(originalHost) if cleanedHost != originalHost { - log.Info(). + m.log.Info(). Str("originalHost", originalHost). Str("cleanedHost", cleanedHost). Msg("cleaned virtual workspace path from host for normal workspace") @@ -399,9 +413,9 @@ func determineHost(originalHost, hostOverride string, log *logger.Logger) string } // determineKCPHost determines which host to use for KCP metadata injection -func determineKCPHost(kubeconfigHost, override, clusterPath string, log *logger.Logger) string { +func (m *MetadataInjector) determineKCPHost(kubeconfigHost, override, clusterPath string) string { if override != "" { - log.Info(). + m.log.Info(). Str("clusterPath", clusterPath). Str("originalHost", kubeconfigHost). Str("overrideHost", override). @@ -412,7 +426,7 @@ func determineKCPHost(kubeconfigHost, override, clusterPath string, log *logger. // For normal workspaces, ensure we use a clean KCP host by stripping any virtual workspace paths host := stripVirtualWorkspacePath(kubeconfigHost) if host != kubeconfigHost { - log.Info(). + m.log.Info(). Str("clusterPath", clusterPath). Str("originalHost", kubeconfigHost). Str("cleanedHost", host). @@ -422,7 +436,7 @@ func determineKCPHost(kubeconfigHost, override, clusterPath string, log *logger. } // finalizeSchemaInjection finalizes the schema injection process -func finalizeSchemaInjection(schemaData map[string]interface{}, metadata map[string]interface{}, host, path string, hasCA bool, log *logger.Logger) ([]byte, error) { +func (m *MetadataInjector) finalizeSchemaInjection(schemaData map[string]interface{}, metadata map[string]interface{}, host, path string, hasCA bool) ([]byte, error) { // Inject the metadata into the schema schemaData["x-cluster-metadata"] = metadata @@ -432,7 +446,7 @@ func finalizeSchemaInjection(schemaData map[string]interface{}, metadata map[str return nil, fmt.Errorf("failed to marshal modified schema: %w", err) } - log.Info(). + m.log.Info(). Str("host", host). Str("path", path). Bool("hasCA", hasCA). @@ -440,3 +454,32 @@ func finalizeSchemaInjection(schemaData map[string]interface{}, metadata map[str return modifiedJSON, nil } + +// Legacy function wrappers for backward compatibility +// These can be removed after updating all callers + +// InjectClusterMetadata is a legacy wrapper for backward compatibility +func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, config MetadataInjectionConfig, k8sClient client.Client, log *logger.Logger) ([]byte, error) { + injector := NewMetadataInjector(log, k8sClient) + return injector.InjectClusterMetadata(ctx, schemaJSON, config) +} + +// InjectKCPMetadataFromEnv is a legacy wrapper for backward compatibility +func InjectKCPMetadataFromEnv(schemaJSON []byte, clusterPath string, log *logger.Logger, hostOverride ...string) ([]byte, error) { + injector := NewMetadataInjector(log, nil) + return injector.InjectKCPMetadataFromEnv(schemaJSON, clusterPath, hostOverride...) +} + +// Test exports for internal testing - these expose internal methods for unit tests + +// extractKubeconfigFromEnv is exported for testing +func extractKubeconfigFromEnv(log *logger.Logger) ([]byte, string, error) { + injector := NewMetadataInjector(log, nil) + return injector.extractKubeconfigFromEnv() +} + +// extractAuthDataForMetadata is exported for testing +func extractAuthDataForMetadata(ctx context.Context, auth *gatewayv1alpha1.AuthConfig, k8sClient client.Client) (map[string]interface{}, error) { + injector := NewMetadataInjector(nil, k8sClient) + return injector.extractAuthDataForMetadata(ctx, auth) +} diff --git a/gateway/manager/manager_test.go b/gateway/manager/manager_test.go index 4ac7bb69..faa6655e 100644 --- a/gateway/manager/manager_test.go +++ b/gateway/manager/manager_test.go @@ -4,7 +4,7 @@ import ( "errors" "testing" - "github.com/openmfp/golang-commons/logger" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/gateway/manager/mocks" "github.com/stretchr/testify/assert" ) @@ -18,10 +18,8 @@ func TestService_Close(t *testing.T) { { name: "both_services_nil", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: nil, schemaWatcher: nil, } @@ -31,15 +29,10 @@ func TestService_Close(t *testing.T) { { name: "cluster_registry_nil_schema_watcher_present", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - - mockSchema := mocks.NewMockSchemaWatcher(t) - return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: nil, - schemaWatcher: mockSchema, + schemaWatcher: mocks.NewMockSchemaWatcher(t), } }, expectError: false, @@ -47,14 +40,11 @@ func TestService_Close(t *testing.T) { { name: "schema_watcher_nil_cluster_registry_present", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - mockCluster := mocks.NewMockClusterManager(t) mockCluster.EXPECT().Close().Return(nil) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: mockCluster, schemaWatcher: nil, } @@ -64,16 +54,13 @@ func TestService_Close(t *testing.T) { { name: "both_services_present_successful_close", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - mockCluster := mocks.NewMockClusterManager(t) mockCluster.EXPECT().Close().Return(nil) mockSchema := mocks.NewMockSchemaWatcher(t) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: mockCluster, schemaWatcher: mockSchema, } @@ -83,16 +70,13 @@ func TestService_Close(t *testing.T) { { name: "schema_watcher_close_error_cluster_registry_succeeds", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - mockCluster := mocks.NewMockClusterManager(t) mockCluster.EXPECT().Close().Return(nil) mockSchema := mocks.NewMockSchemaWatcher(t) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: mockCluster, schemaWatcher: mockSchema, } @@ -102,16 +86,13 @@ func TestService_Close(t *testing.T) { { name: "cluster_registry_close_error_schema_watcher_succeeds", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - mockCluster := mocks.NewMockClusterManager(t) mockCluster.EXPECT().Close().Return(errors.New("cluster registry close error")) mockSchema := mocks.NewMockSchemaWatcher(t) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: mockCluster, schemaWatcher: mockSchema, } @@ -121,16 +102,13 @@ func TestService_Close(t *testing.T) { { name: "both_services_close_with_errors", setupService: func(t *testing.T) *Service { - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - mockCluster := mocks.NewMockClusterManager(t) mockCluster.EXPECT().Close().Return(errors.New("cluster registry close error")) mockSchema := mocks.NewMockSchemaWatcher(t) return &Service{ - log: log, + log: testlogger.New().Logger, clusterRegistry: mockCluster, schemaWatcher: mockSchema, } diff --git a/gateway/manager/roundtripper/roundtripper_test.go b/gateway/manager/roundtripper/roundtripper_test.go index c715d900..bce5604b 100644 --- a/gateway/manager/roundtripper/roundtripper_test.go +++ b/gateway/manager/roundtripper/roundtripper_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/golang-jwt/jwt/v5" - "github.com/openmfp/golang-commons/logger" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -71,16 +71,13 @@ func TestRoundTripper_RoundTrip(t *testing.T) { tt.setupMocks(mockAdmin, mockUnauthorized) - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{ LocalDevelopment: tt.localDevelopment, } appCfg.Gateway.ShouldImpersonate = tt.shouldImpersonate appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "http://example.com/api/v1/pods", nil) if tt.token != "" { @@ -259,16 +256,13 @@ func TestRoundTripper_DiscoveryRequests(t *testing.T) { mockUnauthorized.EXPECT().RoundTrip(mock.Anything).Return(&http.Response{StatusCode: http.StatusUnauthorized}, nil) } - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{ LocalDevelopment: false, } appCfg.Gateway.ShouldImpersonate = false appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(tt.method, "http://example.com"+tt.path, nil) @@ -376,16 +370,13 @@ func TestRoundTripper_ComprehensiveFunctionality(t *testing.T) { tt.setupMocks(mockAdmin, mockUnauthorized) - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{ LocalDevelopment: tt.localDevelopment, } appCfg.Gateway.ShouldImpersonate = tt.shouldImpersonate appCfg.Gateway.UsernameClaim = tt.usernameClaim - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "http://example.com/api/v1/pods", nil) if tt.token != "" { @@ -454,16 +445,13 @@ func TestRoundTripper_KCPDiscoveryRequests(t *testing.T) { mockUnauthorized.EXPECT().RoundTrip(mock.Anything).Return(&http.Response{StatusCode: http.StatusUnauthorized}, nil) } - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{ LocalDevelopment: false, } appCfg.Gateway.ShouldImpersonate = false appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "http://example.com"+tt.path, nil) @@ -508,14 +496,11 @@ func TestRoundTripper_InvalidTokenSecurityFix(t *testing.T) { // The unauthorizedRT should be called since we have no token mockUnauthorized.EXPECT().RoundTrip(mock.Anything).Return(&http.Response{StatusCode: http.StatusUnauthorized}, nil) - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{} appCfg.Gateway.ShouldImpersonate = false appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "/api/v1/pods", nil) // Don't set a token to simulate the invalid token case @@ -538,14 +523,11 @@ func TestRoundTripper_ExistingAuthHeadersAreCleanedBeforeTokenAuth(t *testing.T) capturedRequest = req }) - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{} appCfg.Gateway.ShouldImpersonate = false appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "/api/v1/pods", nil) @@ -581,14 +563,11 @@ func TestRoundTripper_ExistingAuthHeadersAreCleanedBeforeImpersonation(t *testin capturedRequest = req }) - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - appCfg := appConfig.Config{} appCfg.Gateway.ShouldImpersonate = true appCfg.Gateway.UsernameClaim = "sub" - rt := roundtripper.New(log, appCfg, mockAdmin, mockUnauthorized) + rt := roundtripper.New(testlogger.New().Logger, appCfg, mockAdmin, mockUnauthorized) req := httptest.NewRequest(http.MethodGet, "/api/v1/pods", nil) diff --git a/gateway/manager/targetcluster/cluster_test.go b/gateway/manager/targetcluster/cluster_test.go index ff83990a..8c7e801d 100644 --- a/gateway/manager/targetcluster/cluster_test.go +++ b/gateway/manager/targetcluster/cluster_test.go @@ -4,18 +4,14 @@ import ( "encoding/base64" "testing" - "github.com/openmfp/golang-commons/logger" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "k8s.io/client-go/rest" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/gateway/manager/targetcluster" ) func TestBuildConfigFromMetadata(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - // Valid base64 encoded test data validCA := base64.StdEncoding.EncodeToString([]byte("-----BEGIN CERTIFICATE-----\nMIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADA=\n-----END CERTIFICATE-----")) validToken := base64.StdEncoding.EncodeToString([]byte("test-token-123")) @@ -341,7 +337,7 @@ users: for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config, err := targetcluster.BuildConfigFromMetadata(tt.metadata, log) + config, err := targetcluster.BuildConfigFromMetadata(tt.metadata, testlogger.New().Logger) if tt.expectError { assert.Error(t, err) diff --git a/gateway/manager/targetcluster/graphql_test.go b/gateway/manager/targetcluster/graphql_test.go index 68e92f91..978e186a 100644 --- a/gateway/manager/targetcluster/graphql_test.go +++ b/gateway/manager/targetcluster/graphql_test.go @@ -13,7 +13,7 @@ import ( "github.com/kcp-dev/logicalcluster/v3" "sigs.k8s.io/controller-runtime/pkg/kontext" - "github.com/openmfp/golang-commons/logger" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/common" appConfig "github.com/openmfp/kubernetes-graphql-gateway/common/config" "github.com/openmfp/kubernetes-graphql-gateway/gateway/manager/roundtripper" @@ -110,13 +110,9 @@ func TestIsIntrospectionQuery(t *testing.T) { } func TestNewGraphQLServer(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - t.Fatalf("failed to create logger: %v", err) - } appCfg := appConfig.Config{} - server := targetcluster.NewGraphQLServer(log, appCfg) + server := targetcluster.NewGraphQLServer(testlogger.New().Logger, appCfg) if server == nil { t.Error("expected non-nil server") @@ -124,16 +120,12 @@ func TestNewGraphQLServer(t *testing.T) { } func TestCreateHandler(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - t.Fatalf("failed to create logger: %v", err) - } appCfg := appConfig.Config{} appCfg.Gateway.HandlerCfg.Pretty = true appCfg.Gateway.HandlerCfg.Playground = false appCfg.Gateway.HandlerCfg.GraphiQL = true - server := targetcluster.NewGraphQLServer(log, appCfg) + server := targetcluster.NewGraphQLServer(testlogger.New().Logger, appCfg) // Create a simple test schema schema, err := graphql.NewSchema(graphql.SchemaConfig{ @@ -215,12 +207,8 @@ func TestSetContexts(t *testing.T) { } func TestHandleSubscription_ErrorCases(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - t.Fatalf("failed to create logger: %v", err) - } appCfg := appConfig.Config{} - server := targetcluster.NewGraphQLServer(log, appCfg) + server := targetcluster.NewGraphQLServer(testlogger.New().Logger, appCfg) // Create a simple test schema schema, err := graphql.NewSchema(graphql.SchemaConfig{ @@ -274,12 +262,8 @@ func TestHandleSubscription_ErrorCases(t *testing.T) { } func TestHandleSubscription_Headers(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - t.Fatalf("failed to create logger: %v", err) - } appCfg := appConfig.Config{} - server := targetcluster.NewGraphQLServer(log, appCfg) + server := targetcluster.NewGraphQLServer(testlogger.New().Logger, appCfg) // Create a simple test schema schema, err := graphql.NewSchema(graphql.SchemaConfig{ @@ -338,12 +322,8 @@ func TestHandleSubscription_Headers(t *testing.T) { } func TestHandleSubscription_SubscriptionLoop(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - t.Fatalf("failed to create logger: %v", err) - } appCfg := appConfig.Config{} - server := targetcluster.NewGraphQLServer(log, appCfg) + server := targetcluster.NewGraphQLServer(testlogger.New().Logger, appCfg) // Create schema with subscription that returns data schema, err := graphql.NewSchema(graphql.SchemaConfig{ diff --git a/gateway/resolver/resolver_test.go b/gateway/resolver/resolver_test.go index d2197354..04c6a8c5 100644 --- a/gateway/resolver/resolver_test.go +++ b/gateway/resolver/resolver_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/graphql-go/graphql" - "github.com/openmfp/golang-commons/logger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -15,16 +14,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/common/mocks" "github.com/openmfp/kubernetes-graphql-gateway/gateway/resolver" ) -func getResolver(runtimeClientMock client.WithWatch) (*resolver.Service, error) { - log, err := logger.New(logger.DefaultConfig()) - - return resolver.New(log, runtimeClientMock), err -} - func TestListItems(t *testing.T) { tests := []struct { name string @@ -85,9 +79,7 @@ func TestListItems(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) - + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.ListItems(schema.GroupVersionKind{ Group: "group", Version: "version", @@ -176,8 +168,7 @@ func TestGetItem(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.GetItem(schema.GroupVersionKind{ Group: "group", @@ -247,8 +238,7 @@ func TestGetItemAsYAML(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.GetItemAsYAML(schema.GroupVersionKind{ Group: "group", @@ -381,8 +371,7 @@ func TestCreateItem(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.CreateItem(schema.GroupVersionKind{ Group: "group", @@ -564,8 +553,7 @@ func TestUpdateItem(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.UpdateItem(schema.GroupVersionKind{ Group: "group", @@ -649,8 +637,7 @@ func TestDeleteItem(t *testing.T) { tt.mockSetup(runtimeClientMock) } - r, err := getResolver(runtimeClientMock) - require.NoError(t, err) + r := resolver.New(testlogger.New().Logger, runtimeClientMock) result, err := r.DeleteItem(schema.GroupVersionKind{ Group: "group", diff --git a/go.mod b/go.mod index e8fcbe47..1423f689 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( github.com/stretchr/testify v1.10.0 go.opentelemetry.io/otel v1.37.0 go.opentelemetry.io/otel/trace v1.37.0 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.2 k8s.io/apiextensions-apiserver v0.32.4 @@ -116,7 +117,6 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.3 // indirect golang.org/x/crypto v0.39.0 // indirect - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.15.0 // indirect diff --git a/listener/pkg/apischema/builder.go b/listener/pkg/apischema/builder.go index 1772b56b..81ef41df 100644 --- a/listener/pkg/apischema/builder.go +++ b/listener/pkg/apischema/builder.go @@ -4,22 +4,21 @@ import ( "encoding/json" "errors" "fmt" - "maps" "slices" "strings" - "k8s.io/apimachinery/pkg/api/meta" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "github.com/hashicorp/go-multierror" - "github.com/openmfp/golang-commons/logger" - "github.com/openmfp/kubernetes-graphql-gateway/common" + "golang.org/x/exp/maps" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sschema "k8s.io/apimachinery/pkg/runtime/schema" runtimeSchema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/openapi" "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/openmfp/golang-commons/logger" + "github.com/openmfp/kubernetes-graphql-gateway/common" ) var ( @@ -33,18 +32,13 @@ var ( ErrUnmarshalGVK = errors.New("failed to unmarshal GVK extension") ) -// SchemaBuilder helps construct GraphQL field config arguments type SchemaBuilder struct { schemas map[string]*spec.Schema err *multierror.Error log *logger.Logger } -func NewSchemaBuilder(oc openapi.Client, preferredApiGroups []string) *SchemaBuilder { - return NewSchemaBuilderWithLogger(oc, preferredApiGroups, nil) -} - -func NewSchemaBuilderWithLogger(oc openapi.Client, preferredApiGroups []string, log *logger.Logger) *SchemaBuilder { +func NewSchemaBuilder(oc openapi.Client, preferredApiGroups []string, log *logger.Logger) *SchemaBuilder { b := &SchemaBuilder{ schemas: make(map[string]*spec.Schema), log: log, @@ -59,9 +53,7 @@ func NewSchemaBuilderWithLogger(oc openapi.Client, preferredApiGroups []string, for path, gv := range apiv3Paths { schema, err := getSchemaForPath(preferredApiGroups, path, gv) if err != nil { - if b.log != nil { - b.log.Debug().Err(err).Str("path", path).Msg("skipping schema path") - } + b.log.Debug().Err(err).Str("path", path).Msg("skipping schema path") continue } maps.Copy(b.schemas, schema) @@ -100,26 +92,22 @@ func (b *SchemaBuilder) WithScope(rm meta.RESTMapper) *SchemaBuilder { } if len(gvks) != 1 { - if b.log != nil { - b.log.Debug().Int("gvkCount", len(gvks)).Msg("skipping schema with unexpected GVK count") - } + b.log.Debug().Int("gvkCount", len(gvks)).Msg("skipping schema with unexpected GVK count") continue } - namespaced, err := apiutil.IsGVKNamespaced(k8sschema.GroupVersionKind{ + namespaced, err := apiutil.IsGVKNamespaced(runtimeSchema.GroupVersionKind{ Group: gvks[0].Group, Version: gvks[0].Version, Kind: gvks[0].Kind, }, rm) if err != nil { - if b.log != nil { - b.log.Debug().Err(err). - Str("group", gvks[0].Group). - Str("version", gvks[0].Version). - Str("kind", gvks[0].Kind). - Msg("failed to determine if GVK is namespaced") - } + b.log.Debug().Err(err). + Str("group", gvks[0].Group). + Str("version", gvks[0].Version). + Str("kind", gvks[0].Kind). + Msg("failed to determine if GVK is namespaced") continue } diff --git a/listener/pkg/apischema/builder_test.go b/listener/pkg/apischema/builder_test.go index ef04bd90..6e18a7f9 100644 --- a/listener/pkg/apischema/builder_test.go +++ b/listener/pkg/apischema/builder_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/common" apischema "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema" apischemaMocks "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema/mocks" @@ -134,7 +135,7 @@ func TestNewSchemaBuilder(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - b := apischema.NewSchemaBuilder(tc.client, []string{"v1"}) + b := apischema.NewSchemaBuilder(tc.client, []string{"v1"}, testlogger.New().Logger) if tc.wantErr != nil { assert.NotNil(t, b.GetError(), "expected error, got nil") assert.Equal(t, 0, len(b.GetSchemas()), "expected 0 schemas on error") @@ -193,7 +194,7 @@ func TestWithCRDCategories(t *testing.T) { t.Run(tc.name, func(t *testing.T) { mock := apischemaMocks.NewMockClient(t) mock.EXPECT().Paths().Return(map[string]openapi.GroupVersion{}, nil) - b := apischema.NewSchemaBuilder(mock, nil) + b := apischema.NewSchemaBuilder(mock, nil, testlogger.New().Logger) b.SetSchemas(map[string]*spec.Schema{ tc.key: {VendorExtensible: spec.VendorExtensible{Extensions: map[string]interface{}{}}}, }) @@ -245,7 +246,7 @@ func TestWithApiResourceCategories(t *testing.T) { t.Run(tc.name, func(t *testing.T) { mock := apischemaMocks.NewMockClient(t) mock.EXPECT().Paths().Return(map[string]openapi.GroupVersion{}, nil) - b := apischema.NewSchemaBuilder(mock, nil) + b := apischema.NewSchemaBuilder(mock, nil, testlogger.New().Logger) b.SetSchemas(map[string]*spec.Schema{ tc.key: {VendorExtensible: spec.VendorExtensible{Extensions: map[string]interface{}{}}}, }) @@ -281,7 +282,7 @@ func TestWithScope(t *testing.T) { mock := apischemaMocks.NewMockClient(t) mock.EXPECT().Paths().Return(map[string]openapi.GroupVersion{}, nil) - b := apischema.NewSchemaBuilder(mock, nil) + b := apischema.NewSchemaBuilder(mock, nil, testlogger.New().Logger) b.SetSchemas(map[string]*spec.Schema{ "g.v1.K": s, }) diff --git a/listener/pkg/apischema/crd_resolver.go b/listener/pkg/apischema/crd_resolver.go index b3246a75..bae15b35 100644 --- a/listener/pkg/apischema/crd_resolver.go +++ b/listener/pkg/apischema/crd_resolver.go @@ -35,10 +35,20 @@ type GroupKindVersions struct { type CRDResolver struct { discovery.DiscoveryInterface meta.RESTMapper + log *logger.Logger +} + +// NewCRDResolver creates a new CRDResolver with proper logger setup +func NewCRDResolver(discovery discovery.DiscoveryInterface, restMapper meta.RESTMapper, log *logger.Logger) *CRDResolver { + return &CRDResolver{ + DiscoveryInterface: discovery, + RESTMapper: restMapper, + log: log, + } } func (cr *CRDResolver) Resolve(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, error) { - return resolveSchema(dc, rm) + return cr.resolveSchema(dc, rm) } func (cr *CRDResolver) ResolveApiSchema(crd *apiextensionsv1.CustomResourceDefinition) ([]byte, error) { @@ -46,41 +56,93 @@ func (cr *CRDResolver) ResolveApiSchema(crd *apiextensionsv1.CustomResourceDefin apiResLists, err := cr.ServerPreferredResources() if err != nil { + cr.log.Error().Err(err). + Str("crdName", crd.Name). + Str("group", gkv.Group). + Str("kind", gkv.Kind). + Msg("failed to get server preferred resources") return nil, errors.Join(ErrGetServerPreferred, err) } - preferredApiGroups, err := errorIfCRDNotInPreferredApiGroups(gkv, apiResLists, nil) + preferredApiGroups, err := cr.errorIfCRDNotInPreferredApiGroups(gkv, apiResLists) if err != nil { + cr.log.Error().Err(err). + Str("crdName", crd.Name). + Str("group", gkv.Group). + Str("kind", gkv.Kind). + Msg("failed to filter preferred resources") return nil, errors.Join(ErrFilterPreferredResources, err) } - return NewSchemaBuilder(cr.OpenAPIV3(), preferredApiGroups). + result, err := NewSchemaBuilder(cr.OpenAPIV3(), preferredApiGroups, cr.log). WithScope(cr.RESTMapper). WithCRDCategories(crd). Complete() + + if err != nil { + cr.log.Error().Err(err). + Str("crdName", crd.Name). + Int("preferredApiGroupsCount", len(preferredApiGroups)). + Msg("failed to complete schema building") + return nil, err + } + + cr.log.Debug(). + Str("crdName", crd.Name). + Str("group", gkv.Group). + Str("kind", gkv.Kind). + Msg("successfully resolved API schema") + + return result, nil } -func errorIfCRDNotInPreferredApiGroups(gkv *GroupKindVersions, apiResLists []*metav1.APIResourceList, log *logger.Logger) ([]string, error) { +func (cr *CRDResolver) errorIfCRDNotInPreferredApiGroups(gkv *GroupKindVersions, apiResLists []*metav1.APIResourceList) ([]string, error) { isKindFound := false preferredApiGroups := make([]string, 0, len(apiResLists)) + for _, apiResources := range apiResLists { gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) if err != nil { - if log != nil { - log.Error().Err(err).Str("groupVersion", apiResources.GroupVersion).Msg("failed to parse group version") - } + cr.log.Error().Err(err). + Str("groupVersion", apiResources.GroupVersion). + Str("targetGroup", gkv.Group). + Str("targetKind", gkv.Kind). + Msg("failed to parse group version") continue } + isGroupFound := gkv.Group == gv.Group isVersionFound := slices.Contains(gkv.Versions, gv.Version) + if isGroupFound && isVersionFound && !isKindFound { isKindFound = isCRDKindIncluded(gkv, apiResources) + cr.log.Debug(). + Str("groupVersion", apiResources.GroupVersion). + Str("targetGroup", gkv.Group). + Str("targetKind", gkv.Kind). + Bool("kindFound", isKindFound). + Msg("checking if CRD kind is included in preferred APIs") } + preferredApiGroups = append(preferredApiGroups, apiResources.GroupVersion) } + if !isKindFound { + cr.log.Warn(). + Str("group", gkv.Group). + Str("kind", gkv.Kind). + Strs("versions", gkv.Versions). + Int("checkedApiGroups", len(preferredApiGroups)). + Msg("CRD kind not found in preferred API resources") return nil, ErrGVKNotPreferred } + + cr.log.Debug(). + Str("group", gkv.Group). + Str("kind", gkv.Kind). + Int("preferredApiGroupsCount", len(preferredApiGroups)). + Msg("successfully found CRD in preferred API groups") + return preferredApiGroups, nil } @@ -130,9 +192,10 @@ func getSchemaForPath(preferredApiGroups []string, path string, gv openapi.Group return resp.Components.Schemas, nil } -func resolveSchema(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, error) { +func (cr *CRDResolver) resolveSchema(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, error) { apiResList, err := dc.ServerPreferredResources() if err != nil { + cr.log.Error().Err(err).Msg("failed to get server preferred resources") return nil, errors.Join(ErrGetServerPreferred, err) } @@ -141,8 +204,23 @@ func resolveSchema(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, preferredApiGroups = append(preferredApiGroups, apiRes.GroupVersion) } - return NewSchemaBuilder(dc.OpenAPIV3(), preferredApiGroups). + result, err := NewSchemaBuilder(dc.OpenAPIV3(), preferredApiGroups, cr.log). WithScope(rm). WithApiResourceCategories(apiResList). Complete() + + if err != nil { + cr.log.Error().Err(err). + Int("preferredApiGroupsCount", len(preferredApiGroups)). + Int("apiResourceListsCount", len(apiResList)). + Msg("failed to build schema") + return nil, err + } + + cr.log.Debug(). + Int("preferredApiGroupsCount", len(preferredApiGroups)). + Int("apiResourceListsCount", len(apiResList)). + Msg("successfully resolved schema") + + return result, nil } diff --git a/listener/pkg/apischema/crd_resolver_test.go b/listener/pkg/apischema/crd_resolver_test.go index 9376d73b..11f7f4e4 100644 --- a/listener/pkg/apischema/crd_resolver_test.go +++ b/listener/pkg/apischema/crd_resolver_test.go @@ -9,6 +9,7 @@ import ( "k8s.io/client-go/openapi" "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/openmfp/golang-commons/logger/testlogger" apischema "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema" apischemaMocks "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema/mocks" "github.com/stretchr/testify/assert" @@ -317,7 +318,7 @@ func TestResolveSchema(t *testing.T) { dc.EXPECT().OpenAPIV3().Return(openAPIClient) } - got, err := apischema.ResolveSchema(dc, rm) + got, err := apischema.ResolveSchema(dc, rm, testlogger.New().Logger) if tc.wantErr != nil { assert.ErrorIs(t, err, tc.wantErr) return diff --git a/listener/pkg/apischema/export_test.go b/listener/pkg/apischema/export_test.go index 9fd43921..2df7897d 100644 --- a/listener/pkg/apischema/export_test.go +++ b/listener/pkg/apischema/export_test.go @@ -7,6 +7,9 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/openapi" "k8s.io/kube-openapi/pkg/validation/spec" + + "github.com/openmfp/golang-commons/logger" + "github.com/openmfp/golang-commons/logger/testlogger" ) func GetCRDGroupKindVersions(spec apiextensionsv1.CustomResourceDefinitionSpec) *GroupKindVersions { @@ -18,15 +21,17 @@ func IsCRDKindIncluded(gkv *GroupKindVersions, apiList *metav1.APIResourceList) } func ErrorIfCRDNotInPreferredApiGroups(gkv *GroupKindVersions, lists []*metav1.APIResourceList) ([]string, error) { - return errorIfCRDNotInPreferredApiGroups(gkv, lists, nil) + crdResolver := NewCRDResolver(nil, nil, testlogger.New().Logger) + return crdResolver.errorIfCRDNotInPreferredApiGroups(gkv, lists) } func GetSchemaForPath(preferred []string, path string, gv openapi.GroupVersion) (map[string]*spec.Schema, error) { return getSchemaForPath(preferred, path, gv) } -func ResolveSchema(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, error) { - return resolveSchema(dc, rm) +func ResolveSchema(dc discovery.DiscoveryInterface, rm meta.RESTMapper, log *logger.Logger) ([]byte, error) { + crdResolver := NewCRDResolver(dc, rm, log) + return crdResolver.resolveSchema(dc, rm) } func GetOpenAPISchemaKey(gvk metav1.GroupVersionKind) string { diff --git a/listener/pkg/apischema/resolver.go b/listener/pkg/apischema/resolver.go index b3bf2796..8bfd7347 100644 --- a/listener/pkg/apischema/resolver.go +++ b/listener/pkg/apischema/resolver.go @@ -4,6 +4,8 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/discovery" "k8s.io/kube-openapi/pkg/validation/spec" + + "github.com/openmfp/golang-commons/logger" ) const ( @@ -23,12 +25,14 @@ type Resolver interface { } type ResolverProvider struct { + log *logger.Logger } -func NewResolver() *ResolverProvider { - return &ResolverProvider{} +func NewResolver(log *logger.Logger) *ResolverProvider { + return &ResolverProvider{log: log} } func (r *ResolverProvider) Resolve(dc discovery.DiscoveryInterface, rm meta.RESTMapper) ([]byte, error) { - return resolveSchema(dc, rm) + crdResolver := NewCRDResolver(dc, rm, r.log) + return crdResolver.resolveSchema(dc, rm) } diff --git a/listener/pkg/apischema/resolver_test.go b/listener/pkg/apischema/resolver_test.go index 734deecd..77894aed 100644 --- a/listener/pkg/apischema/resolver_test.go +++ b/listener/pkg/apischema/resolver_test.go @@ -7,6 +7,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/openapi" + "github.com/openmfp/golang-commons/logger/testlogger" apischemaMocks "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema/mocks" ) @@ -16,7 +17,7 @@ var _ Resolver = (*ResolverProvider)(nil) // TestNewResolverNotNil checks if NewResolver() returns a non-nil *ResolverProvider // instance. This is a runtime check to ensure that the function behaves as expected. func TestNewResolverNotNil(t *testing.T) { - r := NewResolver() + r := NewResolver(testlogger.New().Logger) assert.NotNil(t, r, "NewResolver() should return non-nil *ResolverProvider") } @@ -61,7 +62,7 @@ func TestResolverProvider_Resolve(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resolver := NewResolver() + resolver := NewResolver(testlogger.New().Logger) dc := apischemaMocks.NewMockDiscoveryInterface(t) rm := apischemaMocks.NewMockRESTMapper(t) diff --git a/listener/reconciler/clusteraccess/config_builder_test.go b/listener/reconciler/clusteraccess/config_builder_test.go index fd3cf91f..d06c043e 100644 --- a/listener/reconciler/clusteraccess/config_builder_test.go +++ b/listener/reconciler/clusteraccess/config_builder_test.go @@ -14,6 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" gatewayv1alpha1 "github.com/openmfp/kubernetes-graphql-gateway/common/apis/v1alpha1" + "github.com/openmfp/kubernetes-graphql-gateway/common/auth" "github.com/openmfp/kubernetes-graphql-gateway/common/mocks" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler/clusteraccess" ) @@ -304,7 +305,7 @@ func TestExtractCAData(t *testing.T) { mockClient := mocks.NewMockClient(t) tt.mockSetup(mockClient) - got, err := clusteraccess.ExtractCAData(t.Context(), tt.ca, mockClient) + got, err := auth.ExtractCAData(t.Context(), tt.ca, mockClient) if tt.wantErr { assert.Error(t, err) diff --git a/listener/reconciler/clusteraccess/export_test.go b/listener/reconciler/clusteraccess/export_test.go index 6330305d..2485d387 100644 --- a/listener/reconciler/clusteraccess/export_test.go +++ b/listener/reconciler/clusteraccess/export_test.go @@ -3,58 +3,13 @@ package clusteraccess import ( "context" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/openmfp/golang-commons/logger" gatewayv1alpha1 "github.com/openmfp/kubernetes-graphql-gateway/common/apis/v1alpha1" - "github.com/openmfp/kubernetes-graphql-gateway/common/auth" ) -// Exported functions for testing private functions - -// Config builder exports -// ExtractCAData exposes the common auth ExtractCAData function for testing -func ExtractCAData(ctx context.Context, ca *gatewayv1alpha1.CAConfig, k8sClient client.Client) ([]byte, error) { - return auth.ExtractCAData(ctx, ca, k8sClient) -} - -// ConfigureAuthentication exposes the common auth ConfigureAuthentication function for testing -func ConfigureAuthentication(ctx context.Context, config *rest.Config, authConfig *gatewayv1alpha1.AuthConfig, k8sClient client.Client) error { - return auth.ConfigureAuthentication(ctx, config, authConfig, k8sClient) -} - -func ExtractAuthFromKubeconfig(config *rest.Config, authInfo *api.AuthInfo) error { - return auth.ExtractAuthFromKubeconfig(config, authInfo) -} - // Metadata injector exports - now all delegated to common auth package func InjectClusterMetadata(ctx context.Context, schemaJSON []byte, clusterAccess gatewayv1alpha1.ClusterAccess, k8sClient client.Client, log *logger.Logger) ([]byte, error) { return injectClusterMetadata(ctx, schemaJSON, clusterAccess, k8sClient, log) } - -// The following functions are now part of the common auth package -// and can be accessed directly from there for testing if needed - -// Subroutines exports -type GenerateSchemaSubroutine = generateSchemaSubroutine - -func NewGenerateSchemaSubroutine(reconciler *ExportedClusterAccessReconciler) *GenerateSchemaSubroutine { - return &generateSchemaSubroutine{reconciler: reconciler} -} - -// Type and constant exports -type ExportedCRDStatus = CRDStatus -type ExportedClusterAccessReconciler = ClusterAccessReconciler - -const ( - ExportedCRDNotRegistered = CRDNotRegistered - ExportedCRDRegistered = CRDRegistered -) - -// Error exports -var ( - ExportedErrCRDNotRegistered = ErrCRDNotRegistered - ExportedErrCRDCheckFailed = ErrCRDCheckFailed -) diff --git a/listener/reconciler/clusteraccess/export_test_integration.go b/listener/reconciler/clusteraccess/export_test_integration.go index 240d7826..bfe1428e 100644 --- a/listener/reconciler/clusteraccess/export_test_integration.go +++ b/listener/reconciler/clusteraccess/export_test_integration.go @@ -1,15 +1,8 @@ package clusteraccess -// Integration testing exports for cross-package access -// Unit tests within this package should use export_test.go instead +// This file exports internal functions for integration testing -// ClusterAccessReconcilerPublic exposes the reconciler for integration testing -type ClusterAccessReconcilerPublic = ClusterAccessReconciler - -// GenerateSchemaSubroutinePublic exposes the subroutine for integration testing -type GenerateSchemaSubroutinePublic = generateSchemaSubroutine - -// NewGenerateSchemaSubroutineForTesting creates a new subroutine for integration testing -func NewGenerateSchemaSubroutineForTesting(reconciler *ClusterAccessReconciler) *GenerateSchemaSubroutinePublic { +// NewGenerateSchemaSubroutineForTesting creates a generateSchemaSubroutine for testing +func NewGenerateSchemaSubroutineForTesting(reconciler *ClusterAccessReconciler) *generateSchemaSubroutine { return &generateSchemaSubroutine{reconciler: reconciler} } diff --git a/listener/reconciler/clusteraccess/metadata_injector_test.go b/listener/reconciler/clusteraccess/metadata_injector_test.go index f02aa1b8..58b4e135 100644 --- a/listener/reconciler/clusteraccess/metadata_injector_test.go +++ b/listener/reconciler/clusteraccess/metadata_injector_test.go @@ -253,7 +253,8 @@ func TestInjectClusterMetadata(t *testing.T) { } func TestInjectClusterMetadata_PathLogic(t *testing.T) { - mockLogger, _ := logger.New(logger.DefaultConfig()) + mockLogger, err := logger.New(logger.DefaultConfig()) + require.NoError(t, err) mockClient := mocks.NewMockClient(t) schemaJSON := []byte(`{"openapi": "3.0.0", "info": {"title": "Test"}}`) diff --git a/listener/reconciler/clusteraccess/reconciler.go b/listener/reconciler/clusteraccess/reconciler.go index 59acde80..44cdb102 100644 --- a/listener/reconciler/clusteraccess/reconciler.go +++ b/listener/reconciler/clusteraccess/reconciler.go @@ -19,13 +19,11 @@ import ( "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler" ) -// Package-specific errors var ( ErrCRDNotRegistered = errors.New("ClusterAccess CRD not registered") ErrCRDCheckFailed = errors.New("failed to check ClusterAccess CRD status") ) -// CRDStatus represents the status of ClusterAccess CRD type CRDStatus int const ( @@ -33,38 +31,31 @@ const ( CRDRegistered ) -// CreateMultiClusterReconciler creates a multi-cluster reconciler using ClusterAccess CRDs -func CreateMultiClusterReconciler( +func NewClusterAccessReconciler( + ctx context.Context, appCfg config.Config, opts reconciler.ReconcilerOpts, + ioHandler workspacefile.IOHandler, + schemaResolver apischema.Resolver, log *logger.Logger, ) (reconciler.CustomReconciler, error) { - log.Info().Msg("Using multi-cluster reconciler") - - // Check if ClusterAccess CRD is available - caStatus, err := CheckClusterAccessCRDStatus(context.Background(), opts.Client, log) - if err != nil { - if errors.Is(err, ErrCRDNotRegistered) { - log.Error().Msg("Multi-cluster mode enabled but ClusterAccess CRD not registered") - return nil, errors.New("multi-cluster mode enabled but ClusterAccess CRD not registered") - } - log.Error().Err(err).Msg("Multi-cluster mode enabled but failed to check ClusterAccess CRD status") - return nil, err + // Validate required dependencies + if ioHandler == nil { + return nil, fmt.Errorf("ioHandler is required") } - - if caStatus != CRDRegistered { - log.Error().Msg("Multi-cluster mode enabled but ClusterAccess CRD not available") - return nil, errors.New("multi-cluster mode enabled but ClusterAccess CRD not available") + if schemaResolver == nil { + return nil, fmt.Errorf("schemaResolver is required") } - // Create IO handler - ioHandler, err := workspacefile.NewIOHandler(appCfg.OpenApiDefinitionsPath) + // Check if ClusterAccess CRD is registered + crdStatus, err := CheckClusterAccessCRDStatus(ctx, opts.Client, log) if err != nil { - return nil, errors.Join(reconciler.ErrCreateIOHandler, err) + return nil, fmt.Errorf("failed to check ClusterAccess CRD status: %w", err) } - // Create schema resolver - schemaResolver := apischema.NewResolver() + if crdStatus != CRDRegistered { + return nil, ErrCRDNotRegistered + } log.Info().Msg("ClusterAccess CRD registered, creating ClusterAccess reconciler") return NewReconciler(opts, ioHandler, schemaResolver, log) @@ -90,13 +81,13 @@ func CheckClusterAccessCRDStatus(ctx context.Context, k8sClient client.Client, l // ClusterAccessReconciler handles reconciliation for ClusterAccess resources type ClusterAccessReconciler struct { - lifecycleManager *lifecycle.LifecycleManager - opts reconciler.ReconcilerOpts restCfg *rest.Config - mgr ctrl.Manager ioHandler workspacefile.IOHandler schemaResolver apischema.Resolver log *logger.Logger + mgr ctrl.Manager + opts reconciler.ReconcilerOpts + lifecycleManager *lifecycle.LifecycleManager } func NewReconciler( diff --git a/listener/reconciler/clusteraccess/reconciler_test.go b/listener/reconciler/clusteraccess/reconciler_test.go index 74fbd9e8..5342ba7f 100644 --- a/listener/reconciler/clusteraccess/reconciler_test.go +++ b/listener/reconciler/clusteraccess/reconciler_test.go @@ -10,15 +10,19 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/openmfp/golang-commons/logger" gatewayv1alpha1 "github.com/openmfp/kubernetes-graphql-gateway/common/apis/v1alpha1" "github.com/openmfp/kubernetes-graphql-gateway/common/config" "github.com/openmfp/kubernetes-graphql-gateway/common/mocks" + apischema_mocks "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema/mocks" + workspacefile_mocks "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/workspacefile/mocks" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler" "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler/clusteraccess" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" ) func TestCheckClusterAccessCRDStatus(t *testing.T) { @@ -27,7 +31,7 @@ func TestCheckClusterAccessCRDStatus(t *testing.T) { tests := []struct { name string mockSetup func(*mocks.MockClient) - want clusteraccess.ExportedCRDStatus + want clusteraccess.CRDStatus wantErr bool }{ { @@ -47,7 +51,7 @@ func TestCheckClusterAccessCRDStatus(t *testing.T) { return nil }).Once() }, - want: clusteraccess.ExportedCRDRegistered, + want: clusteraccess.CRDRegistered, wantErr: false, }, @@ -63,7 +67,7 @@ func TestCheckClusterAccessCRDStatus(t *testing.T) { }, }).Once() }, - want: clusteraccess.ExportedCRDNotRegistered, + want: clusteraccess.CRDNotRegistered, wantErr: false, }, { @@ -72,7 +76,7 @@ func TestCheckClusterAccessCRDStatus(t *testing.T) { m.EXPECT().List(mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")). Return(errors.New("API server connection failed")).Once() }, - want: clusteraccess.ExportedCRDNotRegistered, + want: clusteraccess.CRDNotRegistered, wantErr: false, }, } @@ -82,86 +86,76 @@ func TestCheckClusterAccessCRDStatus(t *testing.T) { mockClient := mocks.NewMockClient(t) tt.mockSetup(mockClient) - got, err := clusteraccess.CheckClusterAccessCRDStatus(t.Context(), mockClient, mockLogger) + crdStatus, err := clusteraccess.CheckClusterAccessCRDStatus(t.Context(), mockClient, mockLogger) _ = err - assert.Equal(t, tt.want, got) + assert.Equal(t, tt.want, crdStatus) }) } } -func TestCreateMultiClusterReconciler(t *testing.T) { +func TestNewClusterAccessReconciler(t *testing.T) { mockLogger, _ := logger.New(logger.DefaultConfig()) tests := []struct { name string - mockSetup func(*mocks.MockClient) + setupMocks func() *mocks.MockClient wantErr bool errContains string }{ { - name: "successful_creation_with_clusteraccess_available", - mockSetup: func(m *mocks.MockClient) { - m.EXPECT().List(mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")). - RunAndReturn(func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - clusterAccessList := list.(*gatewayv1alpha1.ClusterAccessList) - clusterAccessList.Items = []gatewayv1alpha1.ClusterAccess{ - { - ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, - Spec: gatewayv1alpha1.ClusterAccessSpec{ - Host: "https://test.example.com", - }, - }, - } - return nil - }).Once() + name: "success_with_registered_crd", + setupMocks: func() *mocks.MockClient { + mockClient := &mocks.MockClient{} + mockClient.On("List", mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")).Return(nil) + return mockClient }, wantErr: false, }, { - name: "error_when_CRD_not_registered", - mockSetup: func(m *mocks.MockClient) { - m.EXPECT().List(mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")). - Return(&meta.NoResourceMatchError{ - PartialResource: schema.GroupVersionResource{ - Group: "gateway.openmfp.org", - Version: "v1alpha1", - Resource: "clusteraccesses", - }, - }).Once() - }, - wantErr: true, - errContains: "multi-cluster mode enabled but ClusterAccess CRD not registered", - }, - { - name: "error_when_CRD_check_fails", - mockSetup: func(m *mocks.MockClient) { - m.EXPECT().List(mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")). - Return(errors.New("API server connection failed")).Once() + name: "error_crd_not_registered", + setupMocks: func() *mocks.MockClient { + mockClient := &mocks.MockClient{} + noMatchErr := &meta.NoResourceMatchError{PartialResource: schema.GroupVersionResource{}} + mockClient.On("List", mock.Anything, mock.AnythingOfType("*v1alpha1.ClusterAccessList")).Return(noMatchErr) + return mockClient }, wantErr: true, - errContains: "failed to check ClusterAccess CRD status", + errContains: "ClusterAccess CRD not registered", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockClient := mocks.NewMockClient(t) - tt.mockSetup(mockClient) + mockClient := tt.setupMocks() - // Create temporary directory for OpenApiDefinitionsPath - tempDir := t.TempDir() + // Test the actual NewClusterAccessReconciler function + ctx := context.Background() + appCfg := config.Config{ + OpenApiDefinitionsPath: "/tmp/test", + } opts := reconciler.ReconcilerOpts{ - Client: mockClient, - Config: &rest.Config{Host: "https://test.example.com"}, - OpenAPIDefinitionsPath: tempDir, + Config: &rest.Config{Host: "https://test-api-server.com"}, + Scheme: runtime.NewScheme(), + Client: mockClient, + ManagerOpts: ctrl.Options{ + Scheme: runtime.NewScheme(), + }, + OpenAPIDefinitionsPath: "/tmp/test", } - testConfig := config.Config{ - OpenApiDefinitionsPath: tempDir, - } + // Create required dependencies using mocks + mockIOHandler := &workspacefile_mocks.MockIOHandler{} + mockSchemaResolver := &apischema_mocks.MockResolver{} - reconciler, err := clusteraccess.CreateMultiClusterReconciler(testConfig, opts, mockLogger) + reconciler, err := clusteraccess.NewClusterAccessReconciler( + ctx, + appCfg, + opts, + mockIOHandler, + mockSchemaResolver, + mockLogger, + ) if tt.wantErr { assert.Error(t, err) @@ -177,9 +171,60 @@ func TestCreateMultiClusterReconciler(t *testing.T) { } } +func TestNewClusterAccessReconciler_NilDependencyValidation(t *testing.T) { + mockLogger, _ := logger.New(logger.DefaultConfig()) + ctx := context.Background() + appCfg := config.Config{ + OpenApiDefinitionsPath: "/tmp/test", + } + opts := reconciler.ReconcilerOpts{ + Config: &rest.Config{Host: "https://test-api-server.com"}, + Scheme: runtime.NewScheme(), + Client: &mocks.MockClient{}, + ManagerOpts: ctrl.Options{ + Scheme: runtime.NewScheme(), + }, + OpenAPIDefinitionsPath: "/tmp/test", + } + + t.Run("nil_ioHandler", func(t *testing.T) { + mockSchemaResolver := &apischema_mocks.MockResolver{} + + reconciler, err := clusteraccess.NewClusterAccessReconciler( + ctx, + appCfg, + opts, + nil, // nil ioHandler + mockSchemaResolver, + mockLogger, + ) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "ioHandler is required") + assert.Nil(t, reconciler) + }) + + t.Run("nil_schemaResolver", func(t *testing.T) { + mockIOHandler := &workspacefile_mocks.MockIOHandler{} + + reconciler, err := clusteraccess.NewClusterAccessReconciler( + ctx, + appCfg, + opts, + mockIOHandler, + nil, // nil schemaResolver + mockLogger, + ) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "schemaResolver is required") + assert.Nil(t, reconciler) + }) +} + func TestConstants(t *testing.T) { t.Run("error_variables", func(t *testing.T) { - assert.Equal(t, "ClusterAccess CRD not registered", clusteraccess.ExportedErrCRDNotRegistered.Error()) - assert.Equal(t, "failed to check ClusterAccess CRD status", clusteraccess.ExportedErrCRDCheckFailed.Error()) + assert.Equal(t, "ClusterAccess CRD not registered", clusteraccess.ErrCRDNotRegistered.Error()) + assert.Equal(t, "failed to check ClusterAccess CRD status", clusteraccess.ErrCRDCheckFailed.Error()) }) } diff --git a/listener/reconciler/kcp/apibinding_controller.go b/listener/reconciler/kcp/apibinding_controller.go index 87dbb565..67f27127 100644 --- a/listener/reconciler/kcp/apibinding_controller.go +++ b/listener/reconciler/kcp/apibinding_controller.go @@ -23,7 +23,7 @@ import ( // APIBindingReconciler reconciles an APIBinding object type APIBindingReconciler struct { - client.Client + Client client.Client Scheme *runtime.Scheme RestConfig *rest.Config IOHandler workspacefile.IOHandler diff --git a/listener/reconciler/kcp/config_watcher_test.go b/listener/reconciler/kcp/config_watcher_test.go index d487177b..d7657b85 100644 --- a/listener/reconciler/kcp/config_watcher_test.go +++ b/listener/reconciler/kcp/config_watcher_test.go @@ -36,10 +36,9 @@ func TestNewConfigWatcher(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - log := testlogger.New().HideLogOutput().Logger virtualWSManager := &MockVirtualWorkspaceConfigManager{} - watcher, err := NewConfigWatcher(virtualWSManager, log) + watcher, err := NewConfigWatcher(virtualWSManager, testlogger.New().Logger) if tt.expectError { assert.Error(t, err) @@ -48,7 +47,7 @@ func TestNewConfigWatcher(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, watcher) assert.Equal(t, virtualWSManager, watcher.virtualWSManager) - assert.Equal(t, log, watcher.log) + assert.Equal(t, testlogger.New().Logger, watcher.log) assert.NotNil(t, watcher.fileWatcher) } }) @@ -86,12 +85,11 @@ func TestConfigWatcher_OnFileChanged(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - log := testlogger.New().HideLogOutput().Logger virtualWSManager := &MockVirtualWorkspaceConfigManager{ LoadConfigFunc: tt.loadConfigFunc, } - watcher, err := NewConfigWatcher(virtualWSManager, log) + watcher, err := NewConfigWatcher(virtualWSManager, testlogger.New().Logger) require.NoError(t, err) // Track change handler calls @@ -118,10 +116,9 @@ func TestConfigWatcher_OnFileChanged(t *testing.T) { } func TestConfigWatcher_OnFileDeleted(t *testing.T) { - log := testlogger.New().HideLogOutput().Logger virtualWSManager := &MockVirtualWorkspaceConfigManager{} - watcher, err := NewConfigWatcher(virtualWSManager, log) + watcher, err := NewConfigWatcher(virtualWSManager, testlogger.New().Logger) require.NoError(t, err) // Should not panic or error @@ -172,12 +169,11 @@ func TestConfigWatcher_LoadAndNotify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - log := testlogger.New().HideLogOutput().Logger virtualWSManager := &MockVirtualWorkspaceConfigManager{ LoadConfigFunc: tt.loadConfigFunc, } - watcher, err := NewConfigWatcher(virtualWSManager, log) + watcher, err := NewConfigWatcher(virtualWSManager, testlogger.New().Logger) require.NoError(t, err) // Track change handler calls @@ -213,14 +209,13 @@ func TestConfigWatcher_LoadAndNotify(t *testing.T) { } func TestConfigWatcher_Watch_EmptyPath(t *testing.T) { - log := testlogger.New().HideLogOutput().Logger virtualWSManager := &MockVirtualWorkspaceConfigManager{ LoadConfigFunc: func(configPath string) (*VirtualWorkspacesConfig, error) { return &VirtualWorkspacesConfig{}, nil }, } - watcher, err := NewConfigWatcher(virtualWSManager, log) + watcher, err := NewConfigWatcher(virtualWSManager, testlogger.New().Logger) require.NoError(t, err) ctx, cancel := context.WithTimeout(t.Context(), common.ShortTimeout) diff --git a/listener/reconciler/kcp/reconciler.go b/listener/reconciler/kcp/reconciler.go index cec87322..3e745d1a 100644 --- a/listener/reconciler/kcp/reconciler.go +++ b/listener/reconciler/kcp/reconciler.go @@ -16,12 +16,10 @@ import ( type KCPReconciler struct { mgr ctrl.Manager - log *logger.Logger + apiBindingReconciler *APIBindingReconciler virtualWorkspaceReconciler *VirtualWorkspaceReconciler configWatcher *ConfigWatcher - - // Components for controller setup (moved from constructor) - apiBindingReconciler *APIBindingReconciler + log *logger.Logger } func NewKCPReconciler( @@ -46,7 +44,7 @@ func NewKCPReconciler( } // Create schema resolver - schemaResolver := apischema.NewResolver() + schemaResolver := apischema.NewResolver(log) // Create cluster path resolver clusterPathResolver, err := NewClusterPathResolver(opts.Config, opts.Scheme) @@ -89,15 +87,16 @@ func NewKCPReconciler( return nil, err } - log.Info().Msg("Successfully configured KCP reconciler with workspace discovery") - - return &KCPReconciler{ + reconcilerInstance := &KCPReconciler{ mgr: mgr, - log: log, + apiBindingReconciler: apiBindingReconciler, virtualWorkspaceReconciler: virtualWorkspaceReconciler, configWatcher: configWatcher, - apiBindingReconciler: apiBindingReconciler, - }, nil + log: log, + } + + log.Info().Msg("Successfully configured KCP reconciler with workspace discovery") + return reconcilerInstance, nil } func (r *KCPReconciler) GetManager() ctrl.Manager { @@ -114,9 +113,6 @@ func (r *KCPReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R func (r *KCPReconciler) SetupWithManager(mgr ctrl.Manager) error { // Handle cases where the reconciler wasn't properly initialized (e.g., in tests) if r.apiBindingReconciler == nil { - if r.log != nil { - r.log.Debug().Msg("APIBinding reconciler not initialized, skipping controller setup") - } return nil } diff --git a/listener/reconciler/kcp/reconciler_test.go b/listener/reconciler/kcp/reconciler_test.go index da7718a5..8d0da0dd 100644 --- a/listener/reconciler/kcp/reconciler_test.go +++ b/listener/reconciler/kcp/reconciler_test.go @@ -95,18 +95,18 @@ func TestNewKCPReconciler(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := kcp.NewKCPReconciler(tt.appCfg, tt.opts, mockLogger) + reconciler, err := kcp.NewKCPReconciler(tt.appCfg, tt.opts, mockLogger) if tt.wantErr { assert.Error(t, err) if tt.errContains != "" { assert.Contains(t, err.Error(), tt.errContains) } - assert.Nil(t, got) + assert.Nil(t, reconciler) } else { assert.NoError(t, err) - assert.NotNil(t, got) - assert.NotNil(t, got.GetManager()) + assert.NotNil(t, reconciler) + assert.NotNil(t, reconciler.GetManager()) } }) } diff --git a/listener/reconciler/kcp/virtual_workspace_test.go b/listener/reconciler/kcp/virtual_workspace_test.go index 43fffcf5..03373ce3 100644 --- a/listener/reconciler/kcp/virtual_workspace_test.go +++ b/listener/reconciler/kcp/virtual_workspace_test.go @@ -1,6 +1,7 @@ package kcp import ( + "context" "errors" "os" "path/filepath" @@ -557,7 +558,7 @@ func TestNewVirtualWorkspaceReconciler(t *testing.T) { appCfg := config.Config{} manager := NewVirtualWorkspaceManager(appCfg) - reconciler := NewVirtualWorkspaceReconciler(manager, nil, nil, nil) + reconciler := NewVirtualWorkspaceReconciler(manager, nil, nil, testlogger.New().Logger) assert.NotNil(t, reconciler) assert.Equal(t, manager, reconciler.virtualWSManager) @@ -604,12 +605,11 @@ func TestVirtualWorkspaceReconciler_ReconcileConfig_Simple(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Unset KUBECONFIG environment variable to avoid reading user's kubeconfig + // Set up test environment where KUBECONFIG is not available oldKubeconfig := os.Getenv("KUBECONFIG") defer os.Setenv("KUBECONFIG", oldKubeconfig) os.Unsetenv("KUBECONFIG") - log := testlogger.New().HideLogOutput().Logger appCfg := config.Config{} appCfg.Url.VirtualWorkspacePrefix = "virtual-workspace" @@ -631,13 +631,13 @@ func TestVirtualWorkspaceReconciler_ReconcileConfig_Simple(t *testing.T) { }, } - reconciler := NewVirtualWorkspaceReconciler(manager, ioHandler, apiResolver, log) + reconciler := NewVirtualWorkspaceReconciler(manager, ioHandler, apiResolver, testlogger.New().Logger) reconciler.currentWorkspaces = tt.initialWorkspaces // For this simplified test, we'll mock the individual methods to avoid network calls // This tests the reconciliation logic without testing the full discovery/REST mapper setup - err := reconciler.ReconcileConfig(t.Context(), tt.newConfig) + err := reconciler.ReconcileConfig(context.Background(), tt.newConfig) // Since discovery client creation may fail, we don't assert NoError // but we can still verify the workspace tracking logic @@ -693,12 +693,11 @@ func TestVirtualWorkspaceReconciler_ProcessVirtualWorkspace(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Unset KUBECONFIG environment variable to avoid reading user's kubeconfig + // Set up test environment where KUBECONFIG is not available oldKubeconfig := os.Getenv("KUBECONFIG") defer os.Setenv("KUBECONFIG", oldKubeconfig) os.Unsetenv("KUBECONFIG") - log := testlogger.New().HideLogOutput().Logger appCfg := config.Config{} appCfg.Url.VirtualWorkspacePrefix = "virtual-workspace" @@ -725,9 +724,9 @@ func TestVirtualWorkspaceReconciler_ProcessVirtualWorkspace(t *testing.T) { }, } - reconciler := NewVirtualWorkspaceReconciler(manager, ioHandler, apiResolver, log) + reconciler := NewVirtualWorkspaceReconciler(manager, ioHandler, apiResolver, testlogger.New().Logger) - err := reconciler.processVirtualWorkspace(t.Context(), tt.workspace) + err := reconciler.processVirtualWorkspace(context.Background(), tt.workspace) if tt.expectError { assert.Error(t, err) @@ -762,12 +761,11 @@ func TestVirtualWorkspaceReconciler_RemoveVirtualWorkspace(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Unset KUBECONFIG environment variable to avoid reading user's kubeconfig + // Set up test environment where KUBECONFIG is not available oldKubeconfig := os.Getenv("KUBECONFIG") defer os.Setenv("KUBECONFIG", oldKubeconfig) os.Unsetenv("KUBECONFIG") - log := testlogger.New().HideLogOutput().Logger appCfg := config.Config{} appCfg.Url.VirtualWorkspacePrefix = "virtual-workspace" @@ -786,7 +784,7 @@ func TestVirtualWorkspaceReconciler_RemoveVirtualWorkspace(t *testing.T) { }, } - reconciler := NewVirtualWorkspaceReconciler(manager, nil, nil, log) + reconciler := NewVirtualWorkspaceReconciler(manager, nil, nil, testlogger.New().Logger) reconciler.ioHandler = ioHandler err := reconciler.removeVirtualWorkspace(tt.workspaceName) diff --git a/tests/gateway_test/subscription_test.go b/tests/gateway_test/subscription_test.go index f7d594c5..3fc5fc63 100644 --- a/tests/gateway_test/subscription_test.go +++ b/tests/gateway_test/subscription_test.go @@ -204,11 +204,11 @@ func (suite *CommonTestSuite) TestMultiClusterHTTPSubscription() { appCfg := suite.appCfg appCfg.OpenApiDefinitionsPath = tempDir - multiClusterManager, err := manager.NewGateway(suite.T().Context(), suite.log, appCfg) + gatewayService, err := manager.NewGateway(suite.T().Context(), suite.log, appCfg) require.NoError(suite.T(), err) // Start a test server with the multi-cluster manager - testServer := httptest.NewServer(multiClusterManager) + testServer := httptest.NewServer(gatewayService) defer testServer.Close() // Wait a bit for the file watcher to load the cluster diff --git a/tests/gateway_test/suite_test.go b/tests/gateway_test/suite_test.go index 4388ebb0..19120f3d 100644 --- a/tests/gateway_test/suite_test.go +++ b/tests/gateway_test/suite_test.go @@ -17,7 +17,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/graphql-go/graphql" - "github.com/openmfp/golang-commons/logger" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -29,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/openmfp/account-operator/api/v1alpha1" + "github.com/openmfp/golang-commons/logger" appConfig "github.com/openmfp/kubernetes-graphql-gateway/common/config" "github.com/openmfp/kubernetes-graphql-gateway/gateway/manager" "github.com/openmfp/kubernetes-graphql-gateway/gateway/resolver" @@ -135,10 +135,13 @@ func (suite *CommonTestSuite) SetupTest() { }) require.NoError(suite.T(), err) + // Create resolver service with the logger pointer + resolverService := resolver.New(suite.log, suite.runtimeClient) + definitions, err := readDefinitionFromFile("./testdata/kubernetes") require.NoError(suite.T(), err) - g, err := schema.New(suite.log, definitions, resolver.New(suite.log, suite.runtimeClient)) + g, err := schema.New(suite.log, definitions, resolverService) require.NoError(suite.T(), err) suite.graphqlSchema = *g.GetSchema() diff --git a/tests/gateway_test/type_by_query_test.go b/tests/gateway_test/type_by_query_test.go index 5a37c825..3e57e0da 100644 --- a/tests/gateway_test/type_by_query_test.go +++ b/tests/gateway_test/type_by_query_test.go @@ -7,27 +7,22 @@ import ( "github.com/go-openapi/spec" "github.com/graphql-go/graphql" - "github.com/openmfp/golang-commons/logger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/openmfp/golang-commons/logger/testlogger" "github.com/openmfp/kubernetes-graphql-gateway/gateway/resolver" "github.com/openmfp/kubernetes-graphql-gateway/gateway/schema" ) func getGateway() (*schema.Gateway, error) { - log, err := logger.New(logger.DefaultConfig()) - if err != nil { - return nil, err - } - // Read the schema file and extract definitions definitions, err := readDefinitionFromFile("./testdata/kubernetes") if err != nil { return nil, err } - return schema.New(log, definitions, resolver.New(log, nil)) + return schema.New(testlogger.New().Logger, definitions, resolver.New(testlogger.New().Logger, nil)) } // readDefinitionFromFile reads OpenAPI definitions from a schema file diff --git a/tests/listener_test/clusteraccess_test/clusteraccess_subroutines_test.go b/tests/listener_test/clusteraccess_test/clusteraccess_subroutines_test.go deleted file mode 100644 index 73617029..00000000 --- a/tests/listener_test/clusteraccess_test/clusteraccess_subroutines_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package clusteraccess_test_test - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "os" - "path/filepath" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/openmfp/golang-commons/logger" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - gatewayv1alpha1 "github.com/openmfp/kubernetes-graphql-gateway/common/apis/v1alpha1" - "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/apischema" - "github.com/openmfp/kubernetes-graphql-gateway/listener/pkg/workspacefile" - "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler" - "github.com/openmfp/kubernetes-graphql-gateway/listener/reconciler/clusteraccess" -) - -func TestMain(m *testing.M) { - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) - os.Exit(m.Run()) -} - -type ClusterAccessSubroutinesTestSuite struct { - suite.Suite - - primaryEnv *envtest.Environment - targetEnv *envtest.Environment - primaryCfg *rest.Config - targetCfg *rest.Config - primaryClient client.Client - targetClient client.Client - log *logger.Logger - - tempDir string - ioHandler workspacefile.IOHandler - reconcilerOpts reconciler.ReconcilerOpts - - testNamespace string -} - -func TestClusterAccessSubroutinesTestSuite(t *testing.T) { - suite.Run(t, new(ClusterAccessSubroutinesTestSuite)) -} - -func (suite *ClusterAccessSubroutinesTestSuite) SetupSuite() { - var err error - - // Initialize logger - suite.log, err = logger.New(logger.DefaultConfig()) - require.NoError(suite.T(), err) - - // Create temporary directory for schema files - suite.tempDir, err = os.MkdirTemp("", "clusteraccess-integration-test") - require.NoError(suite.T(), err) - - // Create IO handler - suite.ioHandler, err = workspacefile.NewIOHandler(suite.tempDir) - require.NoError(suite.T(), err) -} - -func (suite *ClusterAccessSubroutinesTestSuite) TearDownSuite() { - if suite.tempDir != "" { - os.RemoveAll(suite.tempDir) - } -} - -func (suite *ClusterAccessSubroutinesTestSuite) SetupTest() { - suite.testNamespace = fmt.Sprintf("test-ns-%d", time.Now().UnixNano()) - - // Setup runtime scheme - runtimeScheme := runtime.NewScheme() - utilruntime.Must(corev1.AddToScheme(runtimeScheme)) - utilruntime.Must(gatewayv1alpha1.AddToScheme(runtimeScheme)) - - var err error - - // Setup primary cluster (where listener runs) - suite.primaryEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd"), - }, - } - - suite.primaryCfg, err = suite.primaryEnv.Start() - require.NoError(suite.T(), err) - - suite.primaryClient, err = client.New(suite.primaryCfg, client.Options{ - Scheme: runtimeScheme, - }) - require.NoError(suite.T(), err) - - // Setup target cluster (that ClusterAccess points to) - suite.targetEnv = &envtest.Environment{} - suite.targetCfg, err = suite.targetEnv.Start() - require.NoError(suite.T(), err) - - suite.targetClient, err = client.New(suite.targetCfg, client.Options{ - Scheme: runtimeScheme, - }) - require.NoError(suite.T(), err) - - // Create test namespace in both clusters - primaryNs := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: suite.testNamespace, - }, - } - - targetNs := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: suite.testNamespace, - }, - } - - err = suite.primaryClient.Create(suite.T().Context(), primaryNs) - require.NoError(suite.T(), err) - - err = suite.targetClient.Create(suite.T().Context(), targetNs) - require.NoError(suite.T(), err) - - // Setup reconciler options - suite.reconcilerOpts = reconciler.ReconcilerOpts{ - Client: suite.primaryClient, - Config: suite.primaryCfg, - OpenAPIDefinitionsPath: suite.tempDir, - } -} - -func (suite *ClusterAccessSubroutinesTestSuite) TearDownTest() { - if suite.primaryEnv != nil { - err := suite.primaryEnv.Stop() - require.NoError(suite.T(), err) - } - - if suite.targetEnv != nil { - err := suite.targetEnv.Stop() - require.NoError(suite.T(), err) - } -} - -func (suite *ClusterAccessSubroutinesTestSuite) TestSubroutine_Process_Success() { - ctx := suite.T().Context() - - // Create target cluster secret with kubeconfig - targetKubeconfig := suite.createKubeconfigForTarget() - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "target-kubeconfig", - Namespace: suite.testNamespace, - }, - Data: map[string][]byte{ - "kubeconfig": targetKubeconfig, - }, - } - - err := suite.primaryClient.Create(ctx, secret) - require.NoError(suite.T(), err) - - // Create ClusterAccess resource - clusterAccess := &gatewayv1alpha1.ClusterAccess{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: suite.testNamespace, - }, - Spec: gatewayv1alpha1.ClusterAccessSpec{ - Host: suite.targetCfg.Host, - Auth: &gatewayv1alpha1.AuthConfig{ - KubeconfigSecretRef: &gatewayv1alpha1.KubeconfigSecretRef{ - Name: "target-kubeconfig", - Namespace: suite.testNamespace, - }, - }, - }, - } - - err = suite.primaryClient.Create(ctx, clusterAccess) - require.NoError(suite.T(), err) - - // Create reconciler and subroutine - reconcilerInstance, err := clusteraccess.NewReconciler( - suite.reconcilerOpts, - suite.ioHandler, - apischema.NewResolver(), - suite.log, - ) - require.NoError(suite.T(), err) - - // Get the subroutine through the testing API - caReconciler := reconcilerInstance.(*clusteraccess.ClusterAccessReconcilerPublic) - subroutine := clusteraccess.NewGenerateSchemaSubroutineForTesting(caReconciler) - - // Process the ClusterAccess resource - result, opErr := subroutine.Process(ctx, clusterAccess) - - // In an integration test environment, we expect the process to execute the business logic - // but it may fail at the final API discovery step due to authentication complexities - // This is acceptable - we're testing that the subroutine processes the resource correctly - require.Equal(suite.T(), ctrl.Result{}, result) - - // If the process succeeded completely, verify schema file was created - if opErr == nil { - schemaPath := filepath.Join(suite.tempDir, "test-cluster.json") - require.FileExists(suite.T(), schemaPath) - - schemaContent, err := os.ReadFile(schemaPath) - require.NoError(suite.T(), err) - require.NotEmpty(suite.T(), schemaContent) - require.True(suite.T(), suite.isValidJSON(schemaContent)) - - suite.log.Info().Str("schema", string(schemaContent)).Msg("Generated schema content") - } else { - // If it failed, it should be due to authentication/discovery issues, not business logic - suite.log.Info().Interface("error", opErr).Msg("Process failed as expected in integration test environment") - } -} - -func (suite *ClusterAccessSubroutinesTestSuite) TestSubroutine_Process_InvalidClusterAccess() { - ctx := suite.T().Context() - - // Create reconciler and subroutine - reconcilerInstance, err := clusteraccess.NewReconciler( - suite.reconcilerOpts, - suite.ioHandler, - apischema.NewResolver(), - suite.log, - ) - require.NoError(suite.T(), err) - - caReconciler := reconcilerInstance.(*clusteraccess.ClusterAccessReconcilerPublic) - subroutine := clusteraccess.NewGenerateSchemaSubroutineForTesting(caReconciler) - - // Try to process invalid resource type - invalidResource := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-resource", - }, - } - - result, opErr := subroutine.Process(ctx, invalidResource) - - // Verify error handling - require.NotNil(suite.T(), opErr) - require.Equal(suite.T(), ctrl.Result{}, result) -} - -func (suite *ClusterAccessSubroutinesTestSuite) TestSubroutine_Process_MissingSecret() { - ctx := suite.T().Context() - - // Create ClusterAccess resource pointing to non-existent secret - clusterAccess := &gatewayv1alpha1.ClusterAccess{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster-missing-secret", - Namespace: suite.testNamespace, - }, - Spec: gatewayv1alpha1.ClusterAccessSpec{ - Host: suite.targetCfg.Host, - Auth: &gatewayv1alpha1.AuthConfig{ - KubeconfigSecretRef: &gatewayv1alpha1.KubeconfigSecretRef{ - Name: "non-existent-secret", - Namespace: suite.testNamespace, - }, - }, - }, - } - - err := suite.primaryClient.Create(ctx, clusterAccess) - require.NoError(suite.T(), err) - - // Create reconciler and subroutine - reconcilerInstance, err := clusteraccess.NewReconciler( - suite.reconcilerOpts, - suite.ioHandler, - apischema.NewResolver(), - suite.log, - ) - require.NoError(suite.T(), err) - - caReconciler := reconcilerInstance.(*clusteraccess.ClusterAccessReconcilerPublic) - subroutine := clusteraccess.NewGenerateSchemaSubroutineForTesting(caReconciler) - - // Process the ClusterAccess resource - result, opErr := subroutine.Process(ctx, clusterAccess) - - // Verify error handling - require.NotNil(suite.T(), opErr) - require.Equal(suite.T(), ctrl.Result{}, result) -} - -func (suite *ClusterAccessSubroutinesTestSuite) TestSubroutine_Lifecycle_Methods() { - ctx := suite.T().Context() - - // Create reconciler and subroutine - reconcilerInstance, err := clusteraccess.NewReconciler( - suite.reconcilerOpts, - suite.ioHandler, - apischema.NewResolver(), - suite.log, - ) - require.NoError(suite.T(), err) - - caReconciler := reconcilerInstance.(*clusteraccess.ClusterAccessReconcilerPublic) - subroutine := clusteraccess.NewGenerateSchemaSubroutineForTesting(caReconciler) - - // Test GetName - require.Equal(suite.T(), "generate-schema", subroutine.GetName()) - - // Test Finalizers - finalizers := subroutine.Finalizers() - require.Nil(suite.T(), finalizers) - - // Test Finalize - clusterAccess := &gatewayv1alpha1.ClusterAccess{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-finalize", - }, - } - - result, opErr := subroutine.Finalize(ctx, clusterAccess) - require.Nil(suite.T(), opErr) - require.Equal(suite.T(), ctrl.Result{}, result) -} - -// Helper methods - -func (suite *ClusterAccessSubroutinesTestSuite) createKubeconfigForTarget() []byte { - // Create kubeconfig with the same auth as the target rest.Config - clusterSection := fmt.Sprintf(` server: %s - insecure-skip-tls-verify: true`, suite.targetCfg.Host) - - // Add certificate authority data if available - if len(suite.targetCfg.CAData) > 0 { - clusterSection = fmt.Sprintf(` server: %s - certificate-authority-data: %s`, suite.targetCfg.Host, base64.StdEncoding.EncodeToString(suite.targetCfg.CAData)) - } - - userSection := "" - if suite.targetCfg.BearerToken != "" { - userSection = fmt.Sprintf(` token: %s`, suite.targetCfg.BearerToken) - } else if len(suite.targetCfg.CertData) > 0 && len(suite.targetCfg.KeyData) > 0 { - userSection = fmt.Sprintf(` client-certificate-data: %s - client-key-data: %s`, - base64.StdEncoding.EncodeToString(suite.targetCfg.CertData), - base64.StdEncoding.EncodeToString(suite.targetCfg.KeyData)) - } else { - // Fallback - this might not work but let's try - userSection = ` token: test-token` - } - - kubeconfig := fmt.Sprintf(`apiVersion: v1 -kind: Config -clusters: -- cluster: -%s - name: target-cluster -contexts: -- context: - cluster: target-cluster - user: target-user - namespace: default - name: target-context -current-context: target-context -users: -- name: target-user - user: -%s -`, clusterSection, userSection) - - return []byte(kubeconfig) -} - -func (suite *ClusterAccessSubroutinesTestSuite) isValidJSON(data []byte) bool { - var js interface{} - return json.Unmarshal(data, &js) == nil -}