Skip to content

** DO NOT MERGE ** Testing consul-1.21.0-rc1 + consul-k8s-1.7.0-rc1 with ocp-4.16 + k8s-1.30.0 + kubectl 1.30.0 #4506

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: release/1.7.0-rc1
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/workflows/pr-cloud-accepance.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Dispatch to the consul-k8s-workflows with a nightly cron
name: pr-cloud-acceptance
on:
pull_request:
branches:
- release/1.7.0-rc1

# these should be the only settings that you will ever need to change
env:
BRANCH: ${{ github.event.pull_request.head.ref }}
CONTEXT: "pr"

jobs:
cloud-acceptance:
name: cloud-acceptance
runs-on: ubuntu-latest
steps:
- uses: benc-uk/workflow-dispatch@25b02cc069be46d637e8fe2f1e8484008e9e9609 # v1.2.3
name: cloud
with:
workflow: cloud.yml
repo: hashicorp/consul-k8s-workflows
ref: mukul/testing-ocp-compatibility
token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
inputs: '{ "context":"${{ env.CONTEXT }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ github.sha }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}", "test-integrations": "eks" }'
4 changes: 3 additions & 1 deletion .github/workflows/pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
name: pr
on:
pull_request:
branches:
- release/1.7.0-rc1

# these should be the only settings that you will ever need to change
env:
Expand All @@ -24,7 +26,7 @@ jobs:
with:
workflow: test.yml
repo: hashicorp/consul-k8s-workflows
ref: main
ref: mukul/testing-ocp-compatibility
token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
inputs: '{ "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }'

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/weekly-acceptance-1-7-x.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ on:
schedule:
# * is a special character in YAML so you have to quote this string
# Run weekly on Sunday at 3AM UTC/11PM EST/8PM PST
- cron: '0 3 * * 7'
- cron: '0 3 * * 0'

# these should be the only settings that you will ever need to change
env:
Expand Down
5 changes: 3 additions & 2 deletions acceptance/ci-inputs/kind-inputs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
# SPDX-License-Identifier: MPL-2.0

kindVersion: v0.23.0
kindNodeImage: kindest/node:v1.30.2@sha256:ecfe5841b9bee4fe9690f49c118c33629fa345e3350a0c67a5a34482a99d6bba
kubectlVersion: v1.30.2
# digest for kindest/node:v1.30.0
kindNodeImage: kindest/node:v1.30.0
kubectlVersion: v1.30.0
138 changes: 136 additions & 2 deletions acceptance/tests/openshift/openshift_test_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,148 @@ import (
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"testing"
)

func interruptProcess(t *testing.T) {
p, err := os.FindProcess(os.Getpid())
if err != nil {
t.Logf("Failed to find process: %v", err)
return
}
err = p.Signal(syscall.SIGINT)
if err != nil {
t.Logf("Failed to send interrupt signal: %v", err)
}
}
func removeNamespaceFinalizer(t *testing.T, namespace string) {
cmd := exec.Command("kubectl", "patch", "namespace", namespace,
"--type=json", "-p", `[{"op": "remove", "path": "/spec/finalizers"}]`)

if output, err := cmd.CombinedOutput(); err != nil {
t.Logf("Error removing namespace finalizer: %v\nOutput: %s\n", err, output)
}
verifyNamespaceDeletion(t, namespace)
}

func verifyNamespaceDeletion(t *testing.T, namespace string) {
checkCmd := exec.Command("kubectl", "get", "ns", namespace)
if checkErr := checkCmd.Run(); checkErr != nil {
t.Logf("Namespace %s deleted successfully\n", namespace)
return
}

t.Logf("Namespace still exists. Additional cleanup might be required")
interruptProcess(t)
}

func gatewayControllerDiagnostic(t *testing.T, namespace string) {
// Add diagnostic logging for pods for controller identification
lsNamespaceCmd := exec.Command("oc", "get", "namespaces", "-o", "wide")
lsNamespaceOutput, _ := lsNamespaceCmd.CombinedOutput()
t.Logf("Namespaces in cluster:\n%s", string(lsNamespaceOutput))

podInfoCmd := exec.Command("kubectl", "get", "pods", "-n", namespace)
podInfoOutput, _ := podInfoCmd.CombinedOutput()
t.Logf("Pod status in namespace %s before cleanup:\n%s", namespace, string(podInfoOutput))

podInfoCmd = exec.Command("kubectl", "get", "pods", "-n", "kube-system")
podInfoOutput, _ = podInfoCmd.CombinedOutput()
t.Logf("Pod status in namespace %s before cleanup:\n%s", "kube-system", string(podInfoOutput))
}

func checkAndDeleteNamespace(t *testing.T) {
// There are some commands in this function
//which are not being replaced with this variables and need manual replacement of namespace
namespace := "consul"
// Check if namespace exists and its status
nsCheckCmd := exec.Command("kubectl", "get", "namespace", namespace, "-o", "json")
nsOutput, _ := nsCheckCmd.CombinedOutput()
t.Logf("Consul namespace status before cleanup:\n%s", string(nsOutput))

// Add diagnostic logging before attempting cleanup
logCmd := exec.Command("kubectl", "get", "all", "-n", namespace)
logOutput, _ := logCmd.CombinedOutput()
t.Logf("Resources in consul namespace before cleanup:\n%s", string(logOutput))

// find gateway controller information and logs
gatewayControllerDiagnostic(t, namespace)
// Force cleanup of any stuck resources in the namespace (if it still exists)
t.Log("Checking for any stuck resources...")
forceCleanupCmd := exec.Command("bash", "-c", `
# Try to find finalizers on the namespace
FINALIZERS=$(kubectl get namespace consul -o json 2>/dev/null | jq '.spec.finalizers' 2>/dev/null)
if [ ! -z "$FINALIZERS" ] && [ "$FINALIZERS" != "null" ]; then
echo "Found finalizers on namespace consul"
echo $FINALIZERS
# Remove finalizers from namespace to force deletion
# kubectl get namespace consul -o json | jq '.spec.finalizers = []' | kubectl replace --raw "/api/v1/namespaces/consul/finalize" -f -
fi
if kubectl get namespace consul > /dev/null 2>&1; then
# Check for gateway resources
GATEWAYS=$(kubectl get gateways.gateway.networking.k8s.io -n consul -o json 2>/dev/null || echo "")
echo $GATEWAYS
fi
`)
forceOutput, _ := forceCleanupCmd.CombinedOutput()
t.Logf("Force cleanup result:\n%s", string(forceOutput))

// Get remaining Gateways
getCmd := exec.Command("kubectl", "get", "gateways.gateway.networking.k8s.io", "-n", namespace,
"-o=jsonpath={.items[*].metadata.name}")
output, err := getCmd.CombinedOutput()
t.Logf("Gateway resource check result:\n%s", string(output))

if err != nil {
t.Logf("Error getting gateways: %v\n", err)
return
}
cleanedOutput := strings.TrimSpace(string(output))
if cleanedOutput == "" {
t.Logf("No gateways found, removing namespace finalizer")
removeNamespaceFinalizer(t, namespace)
return
}
if len(cleanedOutput) > 0 {
// Remove finalizers from each gateway
patchCmd := exec.Command("kubectl", "patch", "gateways.gateway.networking.k8s.io", string(output), "-n", namespace,
"--type=json", "-p", `[{"op": "remove", "path": "/metadata/finalizers"}]`)
patchOutput, patchErr := patchCmd.CombinedOutput()
if patchErr != nil {
t.Logf("Error patching gateway: %v\nOutput: %s\n", patchErr, patchOutput)
return
}
t.Logf("Finalizers removed successfully")
removeNamespaceFinalizer(t, namespace)
}
t.Log("Attempting to delete consul namespace if it exists...")
cleanupCmd := exec.Command("kubectl", "delete", "namespace", "consul", "--ignore-not-found=true")
cleanupOutput, cleanupErr := cleanupCmd.CombinedOutput()
// We don't check error here since it's just precautionary cleanup
t.Logf("Namespace deletion attempt result: %v\nOutput: %s", cleanupErr, string(cleanupOutput))

// Wait for namespace to be fully deleted before proceeding
t.Log("Waiting for consul namespace to be fully deleted...")
waitCmd := exec.Command("kubectl", "wait", "--for=delete", "namespace/consul", "--timeout=30s")
waitOutput, waitErr := waitCmd.CombinedOutput() // Ignore errors, as this will error if the namespace doesn't exist at all
t.Logf("Wait result: %v\nOutput: %s", waitErr, string(waitOutput))

// Verify namespace deletion
verifyNamespaceDeletion(t, namespace)
}

func newOpenshiftCluster(t *testing.T, cfg *config.TestConfig, secure, namespaceMirroring bool) {
cmd := exec.Command("helm", "repo", "add", "hashicorp", "https://helm.releases.hashicorp.com")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))
require.NoErrorf(t, err, "failed to add hashicorp helm repo : %s", string(output))

// Check for any stuck resources in the namespace and force cleanup if necessary
checkAndDeleteNamespace(t)

// FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which
// isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s
Expand All @@ -26,7 +159,7 @@ func newOpenshiftCluster(t *testing.T, cfg *config.TestConfig, secure, namespace
assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output))
})

require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))
require.NoErrorf(t, err, "failed to create namespace: %s", string(output))

cmd = exec.Command("kubectl", "create", "secret", "generic",
"consul-ent-license",
Expand Down Expand Up @@ -62,6 +195,7 @@ func newOpenshiftCluster(t *testing.T, cfg *config.TestConfig, secure, namespace
)

output, err = cmd.CombinedOutput()
t.Logf("Output of the helm install command: %s", string(output))
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul")
output, err := cmd.CombinedOutput()
Expand Down
7 changes: 6 additions & 1 deletion charts/consul/test/terraform/aks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,18 @@
terraform {
required_providers {
azurerm = {
version = "3.40.0"
version = "~> 3.95.0"
}
random = {
source = "hashicorp/random"
version = "~> 3.5.0"
}
}
}

provider "azurerm" {
features {}
skip_provider_registration = true
}

provider "local" {}
Expand Down
2 changes: 1 addition & 1 deletion charts/consul/test/terraform/aks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ variable "location" {
}

variable "kubernetes_version" {
default = "1.28"
default = "1.29"
description = "Kubernetes version supported on AKS"
}

Expand Down
2 changes: 1 addition & 1 deletion charts/consul/test/terraform/eks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ module "eks" {
kubeconfig_api_version = "client.authentication.k8s.io/v1beta1"

cluster_name = "consul-k8s-${random_id.suffix[count.index].dec}"
cluster_version = "1.26"
cluster_version = var.kubernetes_version
subnets = module.vpc[count.index].private_subnets
enable_irsa = true

Expand Down
5 changes: 5 additions & 0 deletions charts/consul/test/terraform/eks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ variable "tags" {
default = {}
description = "Tags to attach to the created resources."
}

variable "kubernetes_version" {
default = "1.30"
description = "Kubernetes version supported on EKS"
}
2 changes: 1 addition & 1 deletion charts/consul/test/terraform/gke/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ resource "random_id" "suffix" {

data "google_container_engine_versions" "main" {
location = var.zone
version_prefix = "1.27."
version_prefix = var.kubernetes_version_prefix
}

# We assume that the subnets are already created to save time.
Expand Down
5 changes: 5 additions & 0 deletions charts/consul/test/terraform/gke/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,8 @@ variable "subnet" {
default = "default"
description = "Subnet to create the cluster in. Currently all clusters use the default subnet and we are running out of IPs"
}

variable "kubernetes_version_prefix" {
default = "1.30."
description = "Kubernetes version supported on EKS"
}
Loading