Skip to content

fix: prevent clusters in deletion from being considered by the scheduler #45

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions internal/controllers/scheduler/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,17 @@ func (r *ClusterScheduler) handleCreateOrUpdate(ctx context.Context, req reconci
return rr
}

// if no cluster was found, check if there is an existing cluster that qualifies for the request
if cDef.Template.Spec.Tenancy == clustersv1alpha1.TENANCY_SHARED {
log.Debug("Cluster template allows sharing, checking for fitting clusters", "purpose", purpose, "tenancyCount", cDef.TenancyCount)
// remove all clusters with a non-zero deletion timestamp from the list of candidates
clusters = filters.FilterSlice(clusters, func(args ...any) bool {
c, ok := args[0].(*clustersv1alpha1.Cluster)
if !ok {
return false
}
return c.DeletionTimestamp.IsZero()
})
// unless the cluster template for the requested purpose allows unlimited sharing, filter out all clusters that are already at their tenancy limit
if cDef.TenancyCount > 0 {
clusters = filters.FilterSlice(clusters, func(args ...any) bool {
Expand Down
38 changes: 38 additions & 0 deletions internal/controllers/scheduler/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,4 +446,42 @@ var _ = Describe("Scheduler", func() {
Expect(cluster.DeletionTimestamp).To(BeZero(), "Cluster should not be marked for deletion")
})

It("should not consider clusters that are in deletion for scheduling", func() {
// verify that the cluster is usually considered for scheduling
_, env := defaultTestSetup("testdata", "test-01")

c := &clustersv1alpha1.Cluster{}
c.SetName("shared-1")
c.SetNamespace("shared-twice")
Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed())

cr := &clustersv1alpha1.ClusterRequest{}
cr.SetName("shared")
cr.SetNamespace("foo")
Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed())
env.ShouldReconcile(testutils.RequestFromObject(cr))
Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed())
Expect(cr.Status.Cluster).ToNot(BeNil())
Expect(cr.Status.Cluster.Name).To(Equal(c.Name))
Expect(cr.Status.Cluster.Namespace).To(Equal(c.Namespace))

// repeat, but with the cluster in deletion
_, env = defaultTestSetup("testdata", "test-01")

Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed())
c.Finalizers = []string{"foo"}
Expect(env.Client().Update(env.Ctx, c)).To(Succeed())
Expect(env.Client().Delete(env.Ctx, c)).To(Succeed())
Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed())
Expect(c.DeletionTimestamp).ToNot(BeZero(), "Cluster should be marked for deletion")

Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed())
env.ShouldReconcile(testutils.RequestFromObject(cr))
Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed())
Expect(cr.Status.Cluster).ToNot(BeNil())
Expect(cr.Status.Cluster.Name).ToNot(Equal(c.Name), "Cluster is in deletion and should not be considered for scheduling")
Expect(cr.Status.Cluster.Namespace).To(Equal(c.Namespace))

})

})