Skip to content

Commit 6663f2e

Browse files
authored
Merge pull request #3273 from sttts/sttts-mount-fix-index
🐛 index: fix how error codes are stored for unavailable workspaces
2 parents a1b3400 + e3ec087 commit 6663f2e

File tree

4 files changed

+111
-71
lines changed

4 files changed

+111
-71
lines changed

pkg/index/index.go

Lines changed: 41 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ func New(rewriters []PathRewriter) *State {
5353
return &State{
5454
rewriters: rewriters,
5555

56-
clusterShards: map[logicalcluster.Name]string{},
57-
shardWorkspaceNameCluster: map[string]map[logicalcluster.Name]map[string]logicalcluster.Name{},
58-
shardWorkspaceName: map[string]map[logicalcluster.Name]string{},
59-
shardClusterParentCluster: map[string]map[logicalcluster.Name]logicalcluster.Name{},
60-
shardBaseURLs: map[string]string{},
56+
clusterShards: map[logicalcluster.Name]string{},
57+
shardClusterWorkspaceNameCluster: map[string]map[logicalcluster.Name]map[string]logicalcluster.Name{},
58+
shardClusterWorkspaceName: map[string]map[logicalcluster.Name]string{},
59+
shardClusterParentCluster: map[string]map[logicalcluster.Name]logicalcluster.Name{},
60+
shardBaseURLs: map[string]string{},
6161
// Experimental feature: allow mounts to be used with Workspaces
6262
// structure: (shard, logical cluster, workspace name) -> string serialized mount objects
6363
// This should be simplified once we promote this to workspace structure.
@@ -75,12 +75,12 @@ func New(rewriters []PathRewriter) *State {
7575
type State struct {
7676
rewriters []PathRewriter
7777

78-
lock sync.RWMutex
79-
clusterShards map[logicalcluster.Name]string // logical cluster -> shard name
80-
shardWorkspaceNameCluster map[string]map[logicalcluster.Name]map[string]logicalcluster.Name // (shard name, logical cluster, workspace name) -> logical cluster
81-
shardWorkspaceName map[string]map[logicalcluster.Name]string // (shard name, logical cluster) -> workspace name
82-
shardClusterParentCluster map[string]map[logicalcluster.Name]logicalcluster.Name // (shard name, logical cluster) -> parent logical cluster
83-
shardBaseURLs map[string]string // shard name -> base URL
78+
lock sync.RWMutex
79+
clusterShards map[logicalcluster.Name]string // logical cluster -> shard name
80+
shardClusterWorkspaceNameCluster map[string]map[logicalcluster.Name]map[string]logicalcluster.Name // (shard name, logical cluster, workspace name) -> logical cluster
81+
shardClusterWorkspaceName map[string]map[logicalcluster.Name]string // (shard name, logical cluster) -> workspace name
82+
shardClusterParentCluster map[string]map[logicalcluster.Name]logicalcluster.Name // (shard name, logical cluster) -> parent logical cluster
83+
shardBaseURLs map[string]string // shard name -> base URL
8484
// Experimental feature: allow mounts to be used with Workspaces
8585
shardClusterWorkspaceMountAnnotation map[string]map[logicalcluster.Name]map[string]string // (shard name, logical cluster, workspace name) -> mount object string
8686

@@ -103,29 +103,29 @@ func (c *State) UpsertWorkspace(shard string, ws *tenancyv1alpha1.Workspace) {
103103
if c.shardClusterWorkspaceNameErrorCode[shard] == nil {
104104
c.shardClusterWorkspaceNameErrorCode[shard] = map[logicalcluster.Name]map[string]int{}
105105
}
106-
if c.shardClusterWorkspaceNameErrorCode[shard][logicalcluster.Name(ws.Spec.Cluster)] == nil {
107-
c.shardClusterWorkspaceNameErrorCode[shard][logicalcluster.Name(ws.Spec.Cluster)] = map[string]int{}
106+
if c.shardClusterWorkspaceNameErrorCode[shard][clusterName] == nil {
107+
c.shardClusterWorkspaceNameErrorCode[shard][clusterName] = map[string]int{}
108108
}
109109
// Unavailable workspaces should return 503
110-
c.shardClusterWorkspaceNameErrorCode[shard][logicalcluster.Name(ws.Spec.Cluster)][ws.Name] = 503
110+
c.shardClusterWorkspaceNameErrorCode[shard][clusterName][ws.Name] = 503
111111
} else {
112-
delete(c.shardClusterWorkspaceNameErrorCode[shard][logicalcluster.Name(ws.Spec.Cluster)], ws.Name)
113-
if len(c.shardClusterWorkspaceNameErrorCode[shard][logicalcluster.Name(ws.Spec.Cluster)]) == 0 {
114-
delete(c.shardClusterWorkspaceNameErrorCode[shard], logicalcluster.Name(ws.Spec.Cluster))
112+
delete(c.shardClusterWorkspaceNameErrorCode[shard][clusterName], ws.Name)
113+
if len(c.shardClusterWorkspaceNameErrorCode[shard][clusterName]) == 0 {
114+
delete(c.shardClusterWorkspaceNameErrorCode[shard], clusterName)
115115
}
116116
}
117117

118-
if cluster := c.shardWorkspaceNameCluster[shard][clusterName][ws.Name]; cluster.String() != ws.Spec.Cluster {
119-
if c.shardWorkspaceNameCluster[shard] == nil {
120-
c.shardWorkspaceNameCluster[shard] = map[logicalcluster.Name]map[string]logicalcluster.Name{}
121-
c.shardWorkspaceName[shard] = map[logicalcluster.Name]string{}
118+
if cluster := c.shardClusterWorkspaceNameCluster[shard][clusterName][ws.Name]; cluster.String() != ws.Spec.Cluster {
119+
if c.shardClusterWorkspaceNameCluster[shard] == nil {
120+
c.shardClusterWorkspaceNameCluster[shard] = map[logicalcluster.Name]map[string]logicalcluster.Name{}
121+
c.shardClusterWorkspaceName[shard] = map[logicalcluster.Name]string{}
122122
c.shardClusterParentCluster[shard] = map[logicalcluster.Name]logicalcluster.Name{}
123123
}
124-
if c.shardWorkspaceNameCluster[shard][clusterName] == nil {
125-
c.shardWorkspaceNameCluster[shard][clusterName] = map[string]logicalcluster.Name{}
124+
if c.shardClusterWorkspaceNameCluster[shard][clusterName] == nil {
125+
c.shardClusterWorkspaceNameCluster[shard][clusterName] = map[string]logicalcluster.Name{}
126126
}
127-
c.shardWorkspaceNameCluster[shard][clusterName][ws.Name] = logicalcluster.Name(ws.Spec.Cluster)
128-
c.shardWorkspaceName[shard][logicalcluster.Name(ws.Spec.Cluster)] = ws.Name
127+
c.shardClusterWorkspaceNameCluster[shard][clusterName][ws.Name] = logicalcluster.Name(ws.Spec.Cluster)
128+
c.shardClusterWorkspaceName[shard][logicalcluster.Name(ws.Spec.Cluster)] = ws.Name
129129
c.shardClusterParentCluster[shard][logicalcluster.Name(ws.Spec.Cluster)] = clusterName
130130
}
131131

@@ -144,7 +144,7 @@ func (c *State) DeleteWorkspace(shard string, ws *tenancyv1alpha1.Workspace) {
144144
clusterName := logicalcluster.From(ws)
145145

146146
c.lock.RLock()
147-
_, foundCluster := c.shardWorkspaceNameCluster[shard][clusterName][ws.Name]
147+
_, foundCluster := c.shardClusterWorkspaceNameCluster[shard][clusterName][ws.Name]
148148
_, foundMount := c.shardClusterWorkspaceMountAnnotation[shard][clusterName][ws.Name]
149149
c.lock.RUnlock()
150150

@@ -155,18 +155,18 @@ func (c *State) DeleteWorkspace(shard string, ws *tenancyv1alpha1.Workspace) {
155155
c.lock.Lock()
156156
defer c.lock.Unlock()
157157

158-
if _, foundCluster = c.shardWorkspaceNameCluster[shard][clusterName][ws.Name]; foundCluster {
159-
delete(c.shardWorkspaceNameCluster[shard][clusterName], ws.Name)
160-
if len(c.shardWorkspaceNameCluster[shard][clusterName]) == 0 {
161-
delete(c.shardWorkspaceNameCluster[shard], clusterName)
158+
if _, foundCluster = c.shardClusterWorkspaceNameCluster[shard][clusterName][ws.Name]; foundCluster {
159+
delete(c.shardClusterWorkspaceNameCluster[shard][clusterName], ws.Name)
160+
if len(c.shardClusterWorkspaceNameCluster[shard][clusterName]) == 0 {
161+
delete(c.shardClusterWorkspaceNameCluster[shard], clusterName)
162162
}
163-
if len(c.shardWorkspaceNameCluster[shard]) == 0 {
164-
delete(c.shardWorkspaceNameCluster, shard)
163+
if len(c.shardClusterWorkspaceNameCluster[shard]) == 0 {
164+
delete(c.shardClusterWorkspaceNameCluster, shard)
165165
}
166166

167-
delete(c.shardWorkspaceName[shard], logicalcluster.Name(ws.Spec.Cluster))
168-
if len(c.shardWorkspaceName[shard]) == 0 {
169-
delete(c.shardWorkspaceName, shard)
167+
delete(c.shardClusterWorkspaceName[shard], logicalcluster.Name(ws.Spec.Cluster))
168+
if len(c.shardClusterWorkspaceName[shard]) == 0 {
169+
delete(c.shardClusterWorkspaceName, shard)
170170
}
171171

172172
delete(c.shardClusterParentCluster[shard], logicalcluster.Name(ws.Spec.Cluster))
@@ -238,9 +238,9 @@ func (c *State) DeleteShard(shardName string) {
238238
delete(c.clusterShards, lc)
239239
}
240240
}
241-
delete(c.shardWorkspaceNameCluster, shardName)
241+
delete(c.shardClusterWorkspaceNameCluster, shardName)
242242
delete(c.shardBaseURLs, shardName)
243-
delete(c.shardWorkspaceName, shardName)
243+
delete(c.shardClusterWorkspaceName, shardName)
244244
delete(c.shardClusterParentCluster, shardName)
245245
delete(c.shardClusterWorkspaceNameErrorCode, shardName)
246246
}
@@ -285,19 +285,18 @@ func (c *State) Lookup(path logicalcluster.Path) (Result, bool) {
285285
}
286286
}
287287

288+
if ec, found := c.shardClusterWorkspaceNameErrorCode[shard][cluster][s]; found {
289+
errorCode = ec
290+
}
288291
var found bool
289-
cluster, found = c.shardWorkspaceNameCluster[shard][cluster][s]
292+
cluster, found = c.shardClusterWorkspaceNameCluster[shard][cluster][s]
290293
if !found {
291294
return Result{}, false
292295
}
293296
shard, found = c.clusterShards[cluster]
294297
if !found {
295298
return Result{}, false
296299
}
297-
ec, found := c.shardClusterWorkspaceNameErrorCode[shard][cluster][s]
298-
if found {
299-
errorCode = ec
300-
}
301300
}
302301
return Result{Shard: shard, Cluster: cluster, ErrorCode: errorCode}, true
303302
}

pkg/index/index_test.go

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ func TestLookup(t *testing.T) {
4444
expectedCluster logicalcluster.Name
4545
expectFound bool
4646
expectedURL string
47+
expectedError int
4748
}{
4849
{
4950
name: "an empty indexer is usable",
@@ -228,7 +229,7 @@ func TestLookup(t *testing.T) {
228229
expectFound: false,
229230
},
230231
{
231-
name: "multiple shards: a logical cluster for one:rh workspace is found",
232+
name: "multiple shards: one:rh workspace is a mount with URL",
232233
initialShardsToUpsert: []shardStub{
233234
{
234235
name: "root",
@@ -260,6 +261,39 @@ func TestLookup(t *testing.T) {
260261
expectedShard: "",
261262
expectedURL: "https://kcp.dev.local/services/custom-url/proxy",
262263
},
264+
{
265+
name: "multiple shards: one:rh workspace is a mount, but phase is Unavailable",
266+
initialShardsToUpsert: []shardStub{
267+
{
268+
name: "root",
269+
url: "https://root.kcp.dev",
270+
},
271+
{
272+
name: "beta",
273+
url: "https://beta.kcp.dev",
274+
},
275+
{
276+
name: "gama",
277+
url: "https://gama.kcp.dev",
278+
},
279+
},
280+
initialWorkspacesToUpsert: map[string][]*tenancyv1alpha1.Workspace{
281+
"root": {newWorkspace("org", "root", "one")},
282+
"beta": {withPhase(newWorkspaceWithAnnotation("rh", "one", "two", map[string]string{
283+
"experimental.tenancy.kcp.io/mount": `{"spec":{"ref":{"kind":"KubeCluster","name":"prod-cluster","apiVersion":"proxy.kcp.dev/v1alpha1"}},"status":{"phase":"Ready","url":"https://kcp.dev.local/services/custom-url/proxy"}}`,
284+
}), corev1alpha1.LogicalClusterPhaseUnavailable)},
285+
},
286+
initialLogicalClustersToUpsert: map[string][]*corev1alpha1.LogicalCluster{
287+
"root": {newLogicalCluster("root")},
288+
"beta": {newLogicalCluster("one")},
289+
"gama": {newLogicalCluster("two")},
290+
},
291+
targetPath: logicalcluster.NewPath("one:rh"),
292+
expectFound: true,
293+
expectedCluster: "",
294+
expectedShard: "",
295+
expectedURL: "https://kcp.dev.local/services/custom-url/proxy",
296+
},
263297
}
264298

265299
for _, scenario := range scenarios {
@@ -298,6 +332,9 @@ func TestLookup(t *testing.T) {
298332
if r.URL != scenario.expectedURL {
299333
t.Errorf("unexpected url = %v, for path = %v, expected = %v", r.URL, scenario.targetPath, scenario.expectedURL)
300334
}
335+
if r.ErrorCode != scenario.expectedError {
336+
t.Errorf("unexpected error code = %v, for path = %v, expected = %v", r.ErrorCode, scenario.targetPath, scenario.expectedError)
337+
}
301338
})
302339
}
303340
}
@@ -503,6 +540,11 @@ func newWorkspaceWithAnnotation(name, cluster, scheduledCluster string, annotati
503540
return ws
504541
}
505542

543+
func withPhase(ws *tenancyv1alpha1.Workspace, phase corev1alpha1.LogicalClusterPhaseType) *tenancyv1alpha1.Workspace {
544+
ws.Status.Phase = phase
545+
return ws
546+
}
547+
506548
func newLogicalCluster(cluster string) *corev1alpha1.LogicalCluster {
507549
return &corev1alpha1.LogicalCluster{
508550
ObjectMeta: metav1.ObjectMeta{Name: "cluster", Annotations: map[string]string{"kcp.io/cluster": cluster}},

test/e2e/authorizer/serviceaccounts_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ func TestServiceAccounts(t *testing.T) {
183183
})
184184

185185
t.Run("Access another workspace in the same org", func(t *testing.T) {
186-
t.Log("Create namespace with the same name ")
186+
t.Log("Create workspace with the same name ")
187187
otherPath, _ := framework.NewWorkspaceFixture(t, server, orgPath)
188188
_, err := kubeClusterClient.Cluster(otherPath).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
189189
ObjectMeta: metav1.ObjectMeta{

test/e2e/mounts/mounts_machinery_test.go

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ func TestMountsMachinery(t *testing.T) {
6262

6363
orgPath, _ := framework.NewOrganizationFixture(t, server)
6464

65-
workspaceName := "mounts-machinery"
66-
mountPath, _ := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithName("%s", workspaceName))
65+
mountWorkspaceName := "mounts-machinery"
66+
mountPath, _ := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithName("%s", mountWorkspaceName))
6767

6868
cfg := server.BaseConfig(t)
6969
kcpClusterClient, err := kcpclientset.NewForConfig(cfg)
@@ -90,34 +90,33 @@ func TestMountsMachinery(t *testing.T) {
9090
require.NoError(t, err)
9191
return crdhelpers.IsCRDConditionTrue(crd, apiextensionsv1.Established), yamlMarshal(t, crd)
9292
}, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for CRD to be established")
93-
require.NoError(t, err)
9493

9594
t.Logf("Install a mount object into workspace %q", orgPath)
9695
framework.Eventually(t, func() (bool, string) {
97-
mapper2 := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(orgProviderKCPClient.Cluster(orgPath).Discovery()))
98-
err = helpers.CreateResourceFromFS(ctx, dynamicClusterClient.Cluster(orgPath), mapper2, nil, "kubecluster_mounts.yaml", testFiles)
96+
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(orgProviderKCPClient.Cluster(orgPath).Discovery()))
97+
err = helpers.CreateResourceFromFS(ctx, dynamicClusterClient.Cluster(orgPath), mapper, nil, "kubecluster_mounts.yaml", testFiles)
9998
return err == nil, fmt.Sprintf("%v", err)
10099
}, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for mount object to be installed")
101100

102101
// At this point we have object backing the mount object. So lets add mount annotation to the workspace.
103-
t.Logf("Set a mount annotation into workspace %q", orgPath)
104-
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
105-
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, workspaceName, metav1.GetOptions{})
106-
require.NoError(t, err)
107-
108-
mount := tenancyv1alpha1.Mount{
109-
MountSpec: tenancyv1alpha1.MountSpec{
110-
Reference: &tenancyv1alpha1.ObjectReference{
111-
APIVersion: "contrib.kcp.io/v1alpha1",
112-
Kind: "KubeCluster",
113-
Name: "proxy-cluster", // must match name in kubecluster_mounts.yaml
114-
},
102+
// But order should not matter.
103+
t.Logf("Set a mount annotation onto workspace %q", orgPath)
104+
mount := tenancyv1alpha1.Mount{
105+
MountSpec: tenancyv1alpha1.MountSpec{
106+
Reference: &tenancyv1alpha1.ObjectReference{
107+
APIVersion: "contrib.kcp.io/v1alpha1",
108+
Kind: "KubeCluster",
109+
Name: "proxy-cluster", // must match name in kubecluster_mounts.yaml
115110
},
116-
}
117-
data, err := json.Marshal(mount)
111+
},
112+
}
113+
annValue, err := json.Marshal(mount)
114+
require.NoError(t, err)
115+
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
116+
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, mountWorkspaceName, metav1.GetOptions{})
118117
require.NoError(t, err)
119118

120-
current.Annotations[tenancyv1alpha1.ExperimentalWorkspaceMountAnnotationKey] = string(data)
119+
current.Annotations[tenancyv1alpha1.ExperimentalWorkspaceMountAnnotationKey] = string(annValue)
121120

122121
_, err = kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Update(ctx, current, metav1.UpdateOptions{})
123122
return err
@@ -147,14 +146,14 @@ func TestMountsMachinery(t *testing.T) {
147146
"phase": "Ready",
148147
}
149148

150-
_, err = dynamicClusterClient.Cluster(orgPath).Resource(mountGVR).Namespace("").UpdateStatus(ctx, currentMount, metav1.UpdateOptions{})
149+
_, err = dynamicClusterClient.Cluster(orgPath).Resource(mountGVR).UpdateStatus(ctx, currentMount, metav1.UpdateOptions{})
151150
return err
152151
})
153152
require.NoError(t, err)
154153

155154
t.Log("Workspace should have WorkspaceMountReady and WorkspaceInitialized conditions, both true")
156155
framework.Eventually(t, func() (bool, string) {
157-
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, workspaceName, metav1.GetOptions{})
156+
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, mountWorkspaceName, metav1.GetOptions{})
158157
if err != nil {
159158
return false, err.Error()
160159
}
@@ -163,11 +162,12 @@ func TestMountsMachinery(t *testing.T) {
163162
ready := conditions.IsTrue(current, tenancyv1alpha1.MountConditionReady)
164163
return initialized && ready, yamlMarshal(t, current)
165164
}, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for WorkspaceMountReady and WorkspaceInitialized conditions to be true")
166-
require.NoError(t, err)
167165

168166
t.Log("Workspace access should work")
169-
_, err = kcpClusterClient.Cluster(mountPath).ApisV1alpha1().APIExports().List(ctx, metav1.ListOptions{})
170-
require.NoError(t, err)
167+
framework.Eventually(t, func() (bool, string) {
168+
_, err := kcpClusterClient.Cluster(mountPath).ApisV1alpha1().APIExports().List(ctx, metav1.ListOptions{})
169+
return err == nil, fmt.Sprintf("err = %v", err)
170+
}, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for workspace access to work")
171171

172172
t.Log("Set mount to not ready")
173173
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
@@ -182,11 +182,10 @@ func TestMountsMachinery(t *testing.T) {
182182

183183
t.Log("Workspace phase should become unavailable")
184184
framework.Eventually(t, func() (bool, string) {
185-
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, workspaceName, metav1.GetOptions{})
185+
current, err := kcpClusterClient.Cluster(orgPath).TenancyV1alpha1().Workspaces().Get(ctx, mountWorkspaceName, metav1.GetOptions{})
186186
require.NoError(t, err)
187187
return current.Status.Phase == corev1alpha1.LogicalClusterPhaseUnavailable, yamlMarshal(t, current)
188188
}, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for workspace to become unavailable")
189-
require.NoError(t, err)
190189

191190
t.Logf("Workspace access should eventually fail")
192191
framework.Eventually(t, func() (bool, string) {

0 commit comments

Comments
 (0)