Skip to content
This repository was archived by the owner on Oct 6, 2025. It is now read-only.

Commit 58363ad

Browse files
committed
Track ownerrefs between cluster-scoped and namespaced objects
Signed-off-by: Jonathan Ogilvie <[email protected]>
1 parent dab4cc0 commit 58363ad

File tree

2 files changed

+150
-4
lines changed

2 files changed

+150
-4
lines changed

pkg/cache/cluster.go

Lines changed: 52 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1065,7 +1065,7 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
10651065
}
10661066
for namespace, namespaceKeys := range keysPerNamespace {
10671067
nsNodes := c.nsIndex[namespace]
1068-
graph := buildGraph(nsNodes)
1068+
graph := buildGraph(nsNodes, c.resources)
10691069
visited := make(map[kube.ResourceKey]int)
10701070
for _, key := range namespaceKeys {
10711071
visited[key] = 0
@@ -1095,7 +1095,7 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
10951095
}
10961096
}
10971097

1098-
func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map[types.UID]*Resource {
1098+
func buildGraph(nsNodes map[kube.ResourceKey]*Resource, allResources map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map[types.UID]*Resource {
10991099
// Prepare to construct a graph
11001100
nodesByUID := make(map[types.UID][]*Resource, len(nsNodes))
11011101
for _, node := range nsNodes {
@@ -1106,6 +1106,7 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
11061106
graph := make(map[kube.ResourceKey]map[types.UID]*Resource)
11071107

11081108
// Loop through all nodes, calling each one "childNode," because we're only bothering with it if it has a parent.
1109+
// First process nodes in the current namespace
11091110
for _, childNode := range nsNodes {
11101111
for i, ownerRef := range childNode.OwnerRefs {
11111112
// First, backfill UID of inferred owner child references.
@@ -1115,7 +1116,16 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
11151116
// APIVersion is invalid, so we couldn't find the parent.
11161117
continue
11171118
}
1118-
graphKeyNode, ok := nsNodes[kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: childNode.Ref.Namespace, Name: ownerRef.Name}]
1119+
// Try same-namespace lookup first (preserves existing behavior)
1120+
sameNSKey := kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: childNode.Ref.Namespace, Name: ownerRef.Name}
1121+
graphKeyNode, ok := nsNodes[sameNSKey]
1122+
1123+
// If not found and we have cross-namespace capabilities, try cluster-scoped lookup
1124+
if !ok && allResources != nil {
1125+
clusterScopedKey := kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: "", Name: ownerRef.Name}
1126+
graphKeyNode, ok = allResources[clusterScopedKey]
1127+
}
1128+
11191129
if !ok {
11201130
// No resource found with the given graph key, so move on.
11211131
continue
@@ -1126,6 +1136,18 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
11261136

11271137
// Now that we have the UID of the parent, update the graph.
11281138
uidNodes, ok := nodesByUID[ownerRef.UID]
1139+
if !ok && allResources != nil {
1140+
// If parent not found in current namespace, check if it exists in allResources
1141+
// and create a temporary uidNodes list for cross-namespace relationships
1142+
for _, parentCandidate := range allResources {
1143+
if parentCandidate.Ref.UID == ownerRef.UID {
1144+
uidNodes = []*Resource{parentCandidate}
1145+
ok = true
1146+
break
1147+
}
1148+
}
1149+
}
1150+
11291151
if ok {
11301152
for _, uidNode := range uidNodes {
11311153
// Update the graph for this owner to include the child.
@@ -1148,6 +1170,33 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
11481170
}
11491171
}
11501172
}
1173+
1174+
// Second pass: process cross-namespace children if allResources is provided
1175+
if allResources != nil {
1176+
for _, childNode := range allResources {
1177+
// Skip if already processed in the current namespace
1178+
if _, exists := nsNodes[childNode.ResourceKey()]; exists {
1179+
continue
1180+
}
1181+
1182+
// Check if this child has a parent in the current namespace
1183+
for _, ownerRef := range childNode.OwnerRefs {
1184+
group, err := schema.ParseGroupVersion(ownerRef.APIVersion)
1185+
if err != nil {
1186+
continue
1187+
}
1188+
parentKey := kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: "", Name: ownerRef.Name}
1189+
if parentNode, exists := nsNodes[parentKey]; exists {
1190+
// Found a cross-namespace relationship
1191+
if _, ok := graph[parentNode.ResourceKey()]; !ok {
1192+
graph[parentNode.ResourceKey()] = make(map[types.UID]*Resource)
1193+
}
1194+
graph[parentNode.ResourceKey()][childNode.Ref.UID] = childNode
1195+
}
1196+
}
1197+
}
1198+
}
1199+
11511200
return graph
11521201
}
11531202

pkg/cache/cluster_test.go

Lines changed: 98 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2525
"k8s.io/apimachinery/pkg/runtime"
2626
"k8s.io/apimachinery/pkg/runtime/schema"
27+
"k8s.io/apimachinery/pkg/types"
2728
"k8s.io/apimachinery/pkg/watch"
2829
"k8s.io/client-go/dynamic/fake"
2930
"k8s.io/client-go/kubernetes/scheme"
@@ -1157,6 +1158,102 @@ func TestIterateHierachyV2(t *testing.T) {
11571158
})
11581159
}
11591160

1161+
// TestIterateHierarchyV2_CrossNamespaceOwnerReference tests that cross-namespace
1162+
// owner references work correctly, specifically cluster-scoped resources with
1163+
// namespaced children
1164+
func TestIterateHierarchyV2_CrossNamespaceOwnerReference(t *testing.T) {
1165+
cluster := newCluster(t)
1166+
1167+
// Create cluster-scoped parent resource (ProviderRevision)
1168+
parentUID := types.UID("parent-uid-123")
1169+
clusterScopedParent := &Resource{
1170+
Ref: corev1.ObjectReference{
1171+
APIVersion: "pkg.crossplane.io/v1",
1172+
Kind: "ProviderRevision",
1173+
Name: "provider-aws-cloudformation-3b2c213545b8",
1174+
UID: parentUID,
1175+
// No namespace = cluster-scoped
1176+
},
1177+
OwnerRefs: []metav1.OwnerReference{},
1178+
}
1179+
1180+
// Create namespaced child with ownerReference to cluster-scoped parent
1181+
namespacedChild := &Resource{
1182+
Ref: corev1.ObjectReference{
1183+
APIVersion: "apps/v1",
1184+
Kind: "Deployment",
1185+
Name: "provider-aws-cloudformation-3b2c213545b8",
1186+
Namespace: "crossplane-system",
1187+
UID: types.UID("child-uid-456"),
1188+
},
1189+
OwnerRefs: []metav1.OwnerReference{{
1190+
APIVersion: "pkg.crossplane.io/v1",
1191+
Kind: "ProviderRevision",
1192+
Name: "provider-aws-cloudformation-3b2c213545b8",
1193+
// UID: parentUID, // Don't set UID - let it be resolved via cross-namespace lookup
1194+
}},
1195+
}
1196+
1197+
// Create cluster-scoped child with ownerReference to cluster-scoped parent (this should work already)
1198+
clusterScopedChild := &Resource{
1199+
Ref: corev1.ObjectReference{
1200+
APIVersion: "rbac.authorization.k8s.io/v1",
1201+
Kind: "ClusterRole",
1202+
Name: "crossplane:provider:provider-aws-cloudformation-3b2c213545b8:system",
1203+
UID: types.UID("child-uid-789"),
1204+
// No namespace = cluster-scoped
1205+
},
1206+
OwnerRefs: []metav1.OwnerReference{{
1207+
APIVersion: "pkg.crossplane.io/v1",
1208+
Kind: "ProviderRevision",
1209+
Name: "provider-aws-cloudformation-3b2c213545b8",
1210+
UID: parentUID,
1211+
}},
1212+
}
1213+
1214+
// Add all resources to cluster cache
1215+
cluster.setNode(clusterScopedParent)
1216+
cluster.setNode(namespacedChild)
1217+
cluster.setNode(clusterScopedChild)
1218+
1219+
// Test hierarchy traversal starting from cluster-scoped parent
1220+
var visitedResources []*Resource
1221+
cluster.IterateHierarchyV2(
1222+
[]kube.ResourceKey{clusterScopedParent.ResourceKey()},
1223+
func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
1224+
visitedResources = append(visitedResources, resource)
1225+
return true
1226+
},
1227+
)
1228+
1229+
// Should visit parent + both children (3 resources)
1230+
assert.Equal(t, 3, len(visitedResources), "Should visit parent and both children")
1231+
1232+
visitedNames := make([]string, len(visitedResources))
1233+
for i, res := range visitedResources {
1234+
visitedNames[i] = res.Ref.Name
1235+
}
1236+
1237+
// Check we have the expected resources by type and namespace combination
1238+
foundParent := false
1239+
foundClusterChild := false
1240+
foundNamespacedChild := false
1241+
1242+
for _, res := range visitedResources {
1243+
if res.Ref.Kind == "ProviderRevision" && res.Ref.Namespace == "" {
1244+
foundParent = true
1245+
} else if res.Ref.Kind == "ClusterRole" && res.Ref.Namespace == "" {
1246+
foundClusterChild = true
1247+
} else if res.Ref.Kind == "Deployment" && res.Ref.Namespace == "crossplane-system" {
1248+
foundNamespacedChild = true
1249+
}
1250+
}
1251+
1252+
assert.True(t, foundParent, "Should visit ProviderRevision parent")
1253+
assert.True(t, foundClusterChild, "Should visit ClusterRole child")
1254+
assert.True(t, foundNamespacedChild, "Should visit Deployment child (this tests the fix)")
1255+
}
1256+
11601257
// Test_watchEvents_Deadlock validates that starting watches will not create a deadlock
11611258
// caused by using improper locking in various callback methods when there is a high load on the
11621259
// system.
@@ -1273,7 +1370,7 @@ func BenchmarkBuildGraph(b *testing.B) {
12731370
testResources := buildTestResourceMap()
12741371
b.ResetTimer()
12751372
for n := 0; n < b.N; n++ {
1276-
buildGraph(testResources)
1373+
buildGraph(testResources, nil)
12771374
}
12781375
}
12791376

0 commit comments

Comments
 (0)