Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Openshift tenancy test #4467

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions .github/workflows/pr_openshift.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Dispatch to the consul-k8s-workflows with a nightly cron
name: pr-openshift-acceptance
on:
pull_request:


# these should be the only settings that you will ever need to change
env:
BRANCH: ${{ github.event.pull_request.head.ref }}
CONTEXT: "pr"

jobs:
openshift-acceptance:
name: openshift-acceptance
runs-on: ubuntu-latest
steps:
- uses: benc-uk/workflow-dispatch@25b02cc069be46d637e8fe2f1e8484008e9e9609 # v1.2.3
name: cloud
with:
workflow: cloud.yml
repo: hashicorp/consul-k8s-workflows
ref: use-aws-cluster
token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
inputs: '{ "context":"${{ env.CONTEXT }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ github.sha }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }'
23 changes: 19 additions & 4 deletions acceptance/framework/consul/helm_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@
chartName = h.ChartPath
}
// Retry the install in case previous tests have not finished cleaning up.
retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 30}, t, func(r *retry.R) {
retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 1}, t, func(r *retry.R) {
err := helm.InstallE(r, h.helmOptions, chartName, h.releaseName)
require.NoError(r, err)
})
Expand Down Expand Up @@ -481,9 +481,17 @@
releaseName = release[0]
}
serverPod := fmt.Sprintf("%s-consul-server-0", releaseName)
if releaseName == "" {
serverPod = fmt.Sprintf("consul-server-0")

Check failure on line 485 in acceptance/framework/consul/helm_cluster.go

View workflow job for this annotation

GitHub Actions / golangci-lint

S1039: unnecessary use of fmt.Sprintf (gosimple)
}
return portforward.CreateTunnelToResourcePort(t, serverPod, remotePort, h.helmOptions.KubectlOptions, h.logger)
}

// for instances when namespace is being manually set by the test and needs to be overridden

Check failure on line 490 in acceptance/framework/consul/helm_cluster.go

View workflow job for this annotation

GitHub Actions / golangci-lint

Comment should end in a period (godot)
func (h *HelmCluster) SetNamespace(ns string) {
h.helmOptions.KubectlOptions.Namespace = ns
}

func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...string) (client *api.Client, configAddress string) {
t.Helper()

Expand Down Expand Up @@ -514,10 +522,17 @@
// and will try to read the replication token from the federation secret.
// In secondary servers, we don't create a bootstrap token since ACLs are only bootstrapped in the primary.
// Instead, we provide a replication token that serves the role of the bootstrap token.
aclSecret, err := h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), releaseName+"-consul-bootstrap-acl-token", metav1.GetOptions{})
aclSecretName := releaseName + "-consul-bootstrap-acl-token"
if releaseName == "" {
aclSecretName = "consul-bootstrap-acl-token"
}
aclSecret, err := h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), aclSecretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
federationSecret := fmt.Sprintf("%s-consul-federation", releaseName)
aclSecret, err = h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), federationSecret, metav1.GetOptions{})
federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName)
if releaseName == "" {
federationSecretName = "consul-federation"
}
aclSecret, err = h.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{})
require.NoError(r, err)
config.Token = string(aclSecret.Data["replicationToken"])
} else if err == nil {
Expand Down
61 changes: 3 additions & 58 deletions acceptance/tests/openshift/basic_openshift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,69 +27,14 @@ import (
// Test that api gateway basic functionality works in a default installation and a secure installation.
func TestOpenshift_Basic(t *testing.T) {
cfg := suite.Config()

cmd := exec.Command("helm", "repo", "add", "hashicorp", "https://helm.releases.hashicorp.com")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))

// FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which
// isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s
// directly to bypass. Ideally we would just fix the framework that is running the pod as root.
cmd = exec.Command("kubectl", "create", "namespace", "consul")
output, err = cmd.CombinedOutput()
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd = exec.Command("kubectl", "delete", "namespace", "consul")
output, err = cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output))
})

require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))

cmd = exec.Command("kubectl", "create", "secret", "generic",
"consul-ent-license",
"--namespace", "consul",
`--from-literal=key=`+cfg.EnterpriseLicense)
output, err = cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to add consul enterprise license: %s", string(output))

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd = exec.Command("kubectl", "delete", "secret", "consul-ent-license")
output, err = cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete secret: %s", string(output))
})

chartPath := "../../../charts/consul"
cmd = exec.Command("helm", "upgrade", "--install", "consul", chartPath,
"--namespace", "consul",
"--set", "global.name=consul",
"--set", "connectInject.enabled=true",
"--set", "connectInject.transparentProxy.defaultEnabled=false",
"--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000",
"--set", "global.acls.manageSystemACLs=true",
"--set", "global.tls.enabled=true",
"--set", "global.tls.enableAutoEncrypt=true",
"--set", "global.openshift.enabled=true",
"--set", "global.image="+cfg.ConsulImage,
"--set", "global.imageK8S="+cfg.ConsulK8SImage,
"--set", "global.imageConsulDataplane="+cfg.ConsulDataplaneImage,
"--set", "global.enterpriseLicense.secretName=consul-ent-license",
"--set", "global.enterpriseLicense.secretKey=key",
)
output, err = cmd.CombinedOutput()
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to uninstall consul: %s", string(output))
})

require.NoErrorf(t, err, "failed to install consul: %s", string(output))
newOpenshiftCluster(t, cfg, true, false)

// this is normally called by the environment, but because we have to bypass we have to call it explicitly
logf.SetLogger(logr.New(nil))
logger.Log(t, "creating resources for OpenShift test")

cmd = exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic")
output, err = cmd.CombinedOutput()
cmd := exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic")
output, err := cmd.CombinedOutput()
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("kubectl", "delete", "-f", "../fixtures/cases/openshift/basic")
output, err := cmd.CombinedOutput()
Expand Down
72 changes: 72 additions & 0 deletions acceptance/tests/openshift/openshift_test_runner.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
package openshift

import (
"github.com/hashicorp/consul-k8s/acceptance/framework/config"
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"os/exec"
"strconv"
"testing"
)

func newOpenshiftCluster(t *testing.T, cfg *config.TestConfig, secure, namespaceMirroring bool) {
cmd := exec.Command("helm", "repo", "add", "hashicorp", "https://helm.releases.hashicorp.com")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))

// FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which
// isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s
// directly to bypass. Ideally we would just fix the framework that is running the pod as root.
cmd = exec.Command("kubectl", "create", "namespace", "consul")
output, err = cmd.CombinedOutput()
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd = exec.Command("kubectl", "delete", "namespace", "consul")
output, err = cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output))
})

require.NoErrorf(t, err, "failed to add hashicorp helm repo: %s", string(output))

cmd = exec.Command("kubectl", "create", "secret", "generic",
"consul-ent-license",
"--namespace", "consul",
`--from-literal=key=`+cfg.EnterpriseLicense)
output, err = cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to add consul enterprise license: %s", string(output))

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd = exec.Command("kubectl", "delete", "secret", "consul-ent-license", "--namespace", "consul")
output, err = cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete secret: %s", string(output))
})

chartPath := "../../../charts/consul"
cmd = exec.Command("helm", "upgrade", "--install", "consul", chartPath,
"--namespace", "consul",
"--set", "global.name=consul",
"--set", "connectInject.enabled=true",
"--set", "connectInject.transparentProxy.defaultEnabled=false",
"--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000",
"--set", "global.acls.manageSystemACLs="+strconv.FormatBool(secure),
"--set", "global.tls.enabled="+strconv.FormatBool(secure),
"--set", "global.tls.enableAutoEncrypt="+strconv.FormatBool(secure),
"--set", "global.enableConsulNamespaces="+strconv.FormatBool(namespaceMirroring),
"--set", "global.consulNamespaces.mirroringK8S="+strconv.FormatBool(namespaceMirroring),
"--set", "global.openshift.enabled=true",
"--set", "global.image="+cfg.ConsulImage,
"--set", "global.imageK8S="+cfg.ConsulK8SImage,
"--set", "global.imageConsulDataplane="+cfg.ConsulDataplaneImage,
"--set", "global.enterpriseLicense.secretName=consul-ent-license",
"--set", "global.enterpriseLicense.secretKey=key",
)

output, err = cmd.CombinedOutput()
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to uninstall consul: %s", string(output))
})

require.NoErrorf(t, err, "failed to install consul: %s", string(output))
}
Loading
Loading