Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
147 changes: 2 additions & 145 deletions pkg/kubernetes/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package kubernetes

import (
"context"
"fmt"

"github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/client-go/discovery/cached/memory"
Expand All @@ -12,10 +11,6 @@ import (
"k8s.io/client-go/tools/clientcmd"
)

const (
KubeConfigTargetParameterName = "context"
)

type ManagerProvider interface {
GetTargets(ctx context.Context) ([]string, error)
GetManagerFor(ctx context.Context, target string) (*Manager, error)
Expand All @@ -25,158 +20,20 @@ type ManagerProvider interface {
Close()
}

type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}

var _ ManagerProvider = &kubeConfigClusterProvider{}

type singleClusterProvider struct {
strategy string
manager *Manager
}

var _ ManagerProvider = &singleClusterProvider{}

func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
m, err := NewManager(cfg)
if err != nil {
return nil, err
}

strategy := resolveStrategy(cfg, m)
switch strategy {
case config.ClusterProviderKubeConfig:
return newKubeConfigClusterProvider(m)
case config.ClusterProviderInCluster, config.ClusterProviderDisabled:
return newSingleClusterProvider(m, strategy)
default:
return nil, fmt.Errorf(
"invalid ClusterProviderStrategy '%s', must be 'kubeconfig', 'in-cluster', or 'disabled'",
strategy,
)
}
}

func newKubeConfigClusterProvider(m *Manager) (*kubeConfigClusterProvider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}

rawConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
return nil, err
}

allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}

for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}

allClusterManagers[name] = nil
}

return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}

func newSingleClusterProvider(m *Manager, strategy string) (*singleClusterProvider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}

return &singleClusterProvider{
manager: m,
strategy: strategy,
}, nil
}

func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}

return contextNames, nil
}

func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}

func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
if ok && m != nil {
return m, nil
}

baseManager := k.managers[k.defaultContext]

if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}

m, err := baseManager.newForContext(context)
factory, err := getProviderFactory(strategy)
if err != nil {
return nil, err
}

k.managers[context] = m

return m, nil
}

func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
}

func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]

m.WatchKubeConfig(onKubeConfigChanged)
}

func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]

m.Close()
}

func (s *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}

func (s *singleClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", s.strategy)
}

return s.manager, nil
}

func (s *singleClusterProvider) GetDefaultTarget() string {
return ""
}

func (s *singleClusterProvider) GetTargetParameterName() string {
return ""
}

func (s *singleClusterProvider) WatchTargets(watch func() error) {
s.manager.WatchKubeConfig(watch)
}

func (s *singleClusterProvider) Close() {
s.manager.Close()
return factory(m, cfg)
}

func (m *Manager) newForContext(context string) (*Manager, error) {
Expand Down
109 changes: 109 additions & 0 deletions pkg/kubernetes/provider_kubeconfig.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
package kubernetes

import (
"context"
"fmt"

"github.com/containers/kubernetes-mcp-server/pkg/config"
)

// KubeConfigTargetParameterName is the parameter name used to specify
// the kubeconfig context when using the kubeconfig cluster provider strategy.
const KubeConfigTargetParameterName = "context"

// kubeConfigClusterProvider implements ManagerProvider for managing multiple
// Kubernetes clusters using different contexts from a kubeconfig file.
// It lazily initializes managers for each context as they are requested.
type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}

var _ ManagerProvider = &kubeConfigClusterProvider{}

func init() {
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
}

// newKubeConfigClusterProvider creates a provider that manages multiple clusters
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}

rawConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
return nil, err
}

allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}

for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}

allClusterManagers[name] = nil
}

return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}

func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}

return contextNames, nil
}

func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}

func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
if ok && m != nil {
return m, nil
}

baseManager := k.managers[k.defaultContext]

if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}

m, err := baseManager.newForContext(context)
if err != nil {
return nil, err
}

k.managers[context] = m

return m, nil
}

func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
}

func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]

m.WatchKubeConfig(onKubeConfigChanged)
}

func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]

m.Close()
}
47 changes: 47 additions & 0 deletions pkg/kubernetes/provider_registry.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package kubernetes

import (
"fmt"
"sort"

"github.com/containers/kubernetes-mcp-server/pkg/config"
)

// ProviderFactory creates a new ManagerProvider instance for a given strategy.
// Implementations should validate that the Manager is compatible with their strategy
// (e.g., kubeconfig provider should reject in-cluster managers).
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error)

var providerFactories = make(map[string]ProviderFactory)

// RegisterProvider registers a provider factory for a given strategy name.
// This should be called from init() functions in provider implementation files.
// Panics if a provider is already registered for the given strategy.
func RegisterProvider(strategy string, factory ProviderFactory) {
if _, exists := providerFactories[strategy]; exists {
panic(fmt.Sprintf("provider already registered for strategy '%s'", strategy))
}
providerFactories[strategy] = factory
}
Comment on lines +15 to +25
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

❤️ This is exactly what I was thinking of


// getProviderFactory retrieves a registered provider factory by strategy name.
// Returns an error if no provider is registered for the given strategy.
func getProviderFactory(strategy string) (ProviderFactory, error) {
factory, ok := providerFactories[strategy]
if !ok {
available := GetRegisteredStrategies()
return nil, fmt.Errorf("no provider registered for strategy '%s', available strategies: %v", strategy, available)
}
return factory, nil
}

// GetRegisteredStrategies returns a sorted list of all registered strategy names.
// This is useful for error messages and debugging.
func GetRegisteredStrategies() []string {
strategies := make([]string, 0, len(providerFactories))
for strategy := range providerFactories {
strategies = append(strategies, strategy)
}
sort.Strings(strategies)
return strategies
}
Loading
Loading