diff --git a/internal/controllers/scheduler/controller.go b/internal/controllers/scheduler/controller.go index 8fe108c..2cb4a1a 100644 --- a/internal/controllers/scheduler/controller.go +++ b/internal/controllers/scheduler/controller.go @@ -170,8 +170,17 @@ func (r *ClusterScheduler) handleCreateOrUpdate(ctx context.Context, req reconci return rr } + // if no cluster was found, check if there is an existing cluster that qualifies for the request if cDef.Template.Spec.Tenancy == clustersv1alpha1.TENANCY_SHARED { log.Debug("Cluster template allows sharing, checking for fitting clusters", "purpose", purpose, "tenancyCount", cDef.TenancyCount) + // remove all clusters with a non-zero deletion timestamp from the list of candidates + clusters = filters.FilterSlice(clusters, func(args ...any) bool { + c, ok := args[0].(*clustersv1alpha1.Cluster) + if !ok { + return false + } + return c.DeletionTimestamp.IsZero() + }) // unless the cluster template for the requested purpose allows unlimited sharing, filter out all clusters that are already at their tenancy limit if cDef.TenancyCount > 0 { clusters = filters.FilterSlice(clusters, func(args ...any) bool { diff --git a/internal/controllers/scheduler/controller_test.go b/internal/controllers/scheduler/controller_test.go index 6a773e8..cb66866 100644 --- a/internal/controllers/scheduler/controller_test.go +++ b/internal/controllers/scheduler/controller_test.go @@ -446,4 +446,42 @@ var _ = Describe("Scheduler", func() { Expect(cluster.DeletionTimestamp).To(BeZero(), "Cluster should not be marked for deletion") }) + It("should not consider clusters that are in deletion for scheduling", func() { + // verify that the cluster is usually considered for scheduling + _, env := defaultTestSetup("testdata", "test-01") + + c := &clustersv1alpha1.Cluster{} + c.SetName("shared-1") + c.SetNamespace("shared-twice") + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + + cr := &clustersv1alpha1.ClusterRequest{} + cr.SetName("shared") + cr.SetNamespace("foo") + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + env.ShouldReconcile(testutils.RequestFromObject(cr)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.Status.Cluster).ToNot(BeNil()) + Expect(cr.Status.Cluster.Name).To(Equal(c.Name)) + Expect(cr.Status.Cluster.Namespace).To(Equal(c.Namespace)) + + // repeat, but with the cluster in deletion + _, env = defaultTestSetup("testdata", "test-01") + + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + c.Finalizers = []string{"foo"} + Expect(env.Client().Update(env.Ctx, c)).To(Succeed()) + Expect(env.Client().Delete(env.Ctx, c)).To(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + Expect(c.DeletionTimestamp).ToNot(BeZero(), "Cluster should be marked for deletion") + + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + env.ShouldReconcile(testutils.RequestFromObject(cr)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.Status.Cluster).ToNot(BeNil()) + Expect(cr.Status.Cluster.Name).ToNot(Equal(c.Name), "Cluster is in deletion and should not be considered for scheduling") + Expect(cr.Status.Cluster.Namespace).To(Equal(c.Namespace)) + + }) + })