-
Notifications
You must be signed in to change notification settings - Fork 217
feat: add support for identity service server and updating identity service #1385
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,6 +31,11 @@ import ( | |
"github.com/googleapis/gax-go/v2/apierror" | ||
"github.com/pkg/errors" | ||
"google.golang.org/grpc/codes" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
"k8s.io/client-go/dynamic" | ||
"k8s.io/client-go/tools/clientcmd" | ||
infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" | ||
"sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" | ||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" | ||
|
@@ -157,7 +162,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { | |
conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1.ConditionSeverityInfo, "") | ||
|
||
// Reconcile kubeconfig | ||
err = s.reconcileKubeconfig(ctx, cluster, &log) | ||
kubeConfig, err := s.reconcileKubeconfig(ctx, cluster, &log) | ||
if err != nil { | ||
log.Error(err, "Failed to reconcile CAPI kubeconfig") | ||
return ctrl.Result{}, err | ||
|
@@ -168,6 +173,11 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { | |
return ctrl.Result{}, err | ||
} | ||
|
||
err = s.reconcileIdentityService(ctx, kubeConfig, &log) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: I always recommend the one-line form when we don't need the error later:
(No need to fix, just my 2c) |
||
if err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
|
||
s.scope.SetEndpoint(cluster.GetEndpoint()) | ||
conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) | ||
conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) | ||
|
@@ -481,6 +491,13 @@ func (s *Service) checkDiffAndPrepareUpdate(existingCluster *containerpb.Cluster | |
log.V(4).Info("Master authorized networks config update check", "desired", desiredMasterAuthorizedNetworksConfig) | ||
} | ||
|
||
desiredEnableIdentityService := s.scope.GCPManagedControlPlane.Spec.EnableIdentityService | ||
if desiredEnableIdentityService != existingCluster.GetIdentityServiceConfig().GetEnabled() { | ||
needUpdate = true | ||
clusterUpdate.DesiredIdentityServiceConfig = &containerpb.IdentityServiceConfig{Enabled: desiredEnableIdentityService} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. FYI I believe that some (OK, most) fields cannot be updated in "one shot". https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/ClusterUpdate says "Exactly one update can be applied to a cluster with each request, so at most one field can be provided." I think the easiest way to handle this is probably to build the UpdateClusterRequest as we are doing here, but then to break it down into one-field-at-a-time requests when we actually go to call UpdateCluster (I don't know whether we want to handle in this PR - or maybe it is handled somewhere else and I missed it - but it is a classic gotcha that I'm sure we'll hit!) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is an interesting limit I did not know about. We're actually updating the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can update to:
That should be done everywhere after. does that look better? |
||
log.V(2).Info("Identity service config update required", "current", existingCluster.GetIdentityServiceConfig().GetEnabled(), "desired", desiredEnableIdentityService) | ||
} | ||
|
||
updateClusterRequest := containerpb.UpdateClusterRequest{ | ||
Name: s.scope.ClusterFullName(), | ||
Update: &clusterUpdate, | ||
|
@@ -516,3 +533,78 @@ func compareMasterAuthorizedNetworksConfig(a, b *containerpb.MasterAuthorizedNet | |
} | ||
return true | ||
} | ||
|
||
// reconcileIdentityService set the identity service server in the status of the GCPManagedControlPlane. | ||
func (s *Service) reconcileIdentityService(ctx context.Context, kubeConfig clientcmd.ClientConfig, log *logr.Logger) error { | ||
identityServiceServer, err := s.getIdentityServiceServer(ctx, kubeConfig) | ||
if err != nil { | ||
err = fmt.Errorf("failed to retrieve identity service: %w", err) | ||
log.Error(err, "Failed to retrieve identity service server") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: I personally think we shouldn't do this, we should rely on the caller logging, but I'm guessing this happens more often than we would like (Another thought, likely not for this PR - we should decide whether we should pass the logr in, vs getting it from the ctx) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I second this, it is cleaner to have the caller logging the error and the called method only return the error here. I also agree on the logger being passed as an argument, which we're doing already, and I think is overly convoluted, but I suggest we discuss this in a separate issue and open a stand-alone PR to tidy it up. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I follow the current patterns, I am happy to change. I am not 100% sure what we want to remove.
|
||
return err | ||
} | ||
|
||
s.scope.GCPManagedControlPlane.Status.IdentityServiceServer = identityServiceServer | ||
|
||
return nil | ||
} | ||
|
||
// getIdentityServiceServer retrieve the server to use for authentication using the identity service. | ||
func (s *Service) getIdentityServiceServer(ctx context.Context, kubeConfig clientcmd.ClientConfig) (string, error) { | ||
/* | ||
# Example of the ClientConfig (see https://cloud.google.com/kubernetes-engine/docs/how-to/oidc#configuring_on_a_cluster): | ||
apiVersion: authentication.gke.io/v2alpha1 | ||
kind: ClientConfig | ||
metadata: | ||
name: default | ||
namespace: kube-public | ||
spec: | ||
server: https://192.168.0.1:6443 | ||
*/ | ||
|
||
if !s.scope.GCPManagedControlPlane.Spec.EnableIdentityService { | ||
// Identity service is not enabled, skipping | ||
return "", nil | ||
salasberryfin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
if kubeConfig == nil { | ||
return "", errors.New("provided kubernetes configuration is nil") | ||
} | ||
|
||
config, err := kubeConfig.ClientConfig() | ||
if err != nil { | ||
return "", fmt.Errorf("failed to get client config: %w", err) | ||
} | ||
|
||
dynamicClient, err := dynamic.NewForConfig(config) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to create dynamic client: %w", err) | ||
} | ||
|
||
resourceID := schema.GroupVersionResource{ | ||
Group: "authentication.gke.io", | ||
Version: "v2alpha1", | ||
salasberryfin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
Resource: "clientconfigs", | ||
} | ||
|
||
unstructured, err := dynamicClient.Resource(resourceID).Namespace("kube-public").Get(ctx, "default", metav1.GetOptions{}) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to get identity service client config: %w", err) | ||
} | ||
|
||
gkeClientConfig := struct { | ||
Spec struct { | ||
Server string `json:"server"` | ||
} `json:"spec"` | ||
}{} | ||
|
||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructured.Object, &gkeClientConfig) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to convert unstructured to client config: %w", err) | ||
} | ||
|
||
if gkeClientConfig.Spec.Server == "" { | ||
return "", errors.New("identity service server URL is empty") | ||
} | ||
|
||
return gkeClientConfig.Spec.Server, nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
note: this also fix the error it should have been
updateErr
, now everything iserr