Enhance the E2E test to be runnable against remote clusters on e.g. AWS EKS (#1610)
This contains apparently enough changes to the current E2E test code to make it runnable against remote Kubernetes clusters. I was actually able to make the test passing against my AWS EKS based test clusters with these changes. You still need to trigger it manually from a local checkout of the ARC repo today. But this might be the foundation for automated E2E tests against major cloud providers.
This commit is contained in:
parent
9f6f962fc7
commit
473295e3fc
|
|
@ -51,6 +51,9 @@ if [ "${tool}" == "helm" ]; then
|
|||
--set image.tag=${VERSION} \
|
||||
--set podAnnotations.test-id=${TEST_ID} \
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
|
|
|
|||
|
|
@ -1,6 +1,13 @@
|
|||
# Set actions-runner-controller settings for testing
|
||||
logLevel: "-4"
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
image:
|
||||
actionsRunnerImagePullSecrets:
|
||||
- name:
|
||||
githubWebhookServer:
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
logLevel: "-4"
|
||||
enabled: true
|
||||
labels: {}
|
||||
|
|
|
|||
|
|
@ -21,52 +21,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
|
||||
controllerImageTag = "e2e"
|
||||
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
||||
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
|
||||
runnerDindImageRepo = "actionsrunnercontrollere2e/actions-runner-dind"
|
||||
runnerImageTag = "e2e"
|
||||
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
||||
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
||||
|
||||
prebuildImages = []testing.ContainerImage{
|
||||
controllerImage,
|
||||
runnerImage,
|
||||
runnerDindImage,
|
||||
}
|
||||
|
||||
builds = []testing.DockerBuild{
|
||||
{
|
||||
Dockerfile: "../../Dockerfile",
|
||||
Args: []testing.BuildArg{},
|
||||
Image: controllerImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
{
|
||||
Dockerfile: "../../runner/actions-runner.dockerfile",
|
||||
Args: []testing.BuildArg{
|
||||
{
|
||||
Name: "RUNNER_VERSION",
|
||||
Value: "2.294.0",
|
||||
},
|
||||
},
|
||||
Image: runnerImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
{
|
||||
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
||||
Args: []testing.BuildArg{
|
||||
{
|
||||
Name: "RUNNER_VERSION",
|
||||
Value: "2.294.0",
|
||||
},
|
||||
},
|
||||
Image: runnerDindImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
}
|
||||
|
||||
certManagerVersion = "v1.8.2"
|
||||
|
||||
images = []testing.ContainerImage{
|
||||
|
|
@ -77,11 +31,6 @@ var (
|
|||
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
||||
}
|
||||
|
||||
commonScriptEnv = []string{
|
||||
"SYNC_PERIOD=" + "30s",
|
||||
"RUNNER_TAG=" + runnerImageTag,
|
||||
}
|
||||
|
||||
testResultCMNamePrefix = "test-result-"
|
||||
)
|
||||
|
||||
|
|
@ -128,12 +77,25 @@ func TestE2E(t *testing.T) {
|
|||
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
|
||||
skipArgoTunnelCleanUp := os.Getenv("ARC_E2E_SKIP_ARGO_TUNNEL_CLEAN_UP") != ""
|
||||
|
||||
env := initTestEnv(t, k8sMinorVer)
|
||||
vars := buildVars(os.Getenv("ARC_E2E_IMAGE_REPO"))
|
||||
|
||||
env := initTestEnv(t, k8sMinorVer, vars)
|
||||
if vt := os.Getenv("ARC_E2E_VERIFY_TIMEOUT"); vt != "" {
|
||||
var err error
|
||||
env.VerifyTimeout, err = time.ParseDuration(vt)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse duration %q: %v", vt, err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("build and load images", func(t *testing.T) {
|
||||
env.buildAndLoadImages(t)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("install cert-manager", func(t *testing.T) {
|
||||
env.installCertManager(t)
|
||||
})
|
||||
|
|
@ -182,7 +144,7 @@ func TestE2E(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
||||
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
|
|
@ -242,7 +204,7 @@ func TestE2E(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
||||
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
|
|
@ -270,6 +232,8 @@ func TestE2E(t *testing.T) {
|
|||
type env struct {
|
||||
*testing.Env
|
||||
|
||||
Kind *testing.Kind
|
||||
|
||||
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
||||
// to let ARC authenticate as a GitHub App
|
||||
useApp bool
|
||||
|
|
@ -284,12 +248,98 @@ type env struct {
|
|||
scaleDownDelaySecondsAfterScaleOut int64
|
||||
minReplicas int64
|
||||
dockerdWithinRunnerContainer bool
|
||||
remoteKubeconfig string
|
||||
imagePullSecretName string
|
||||
|
||||
vars vars
|
||||
VerifyTimeout time.Duration
|
||||
}
|
||||
|
||||
func initTestEnv(t *testing.T, k8sMinorVer string) *env {
|
||||
type vars struct {
|
||||
controllerImageRepo, controllerImageTag string
|
||||
|
||||
runnerImageRepo string
|
||||
runnerDindImageRepo string
|
||||
|
||||
prebuildImages []testing.ContainerImage
|
||||
builds []testing.DockerBuild
|
||||
|
||||
commonScriptEnv []string
|
||||
}
|
||||
|
||||
func buildVars(repo string) vars {
|
||||
if repo == "" {
|
||||
repo = "actionsrunnercontrollere2e"
|
||||
}
|
||||
|
||||
var (
|
||||
controllerImageRepo = repo + "/actions-runner-controller"
|
||||
controllerImageTag = "e2e"
|
||||
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
||||
runnerImageRepo = repo + "/actions-runner"
|
||||
runnerDindImageRepo = repo + "/actions-runner-dind"
|
||||
runnerImageTag = "e2e"
|
||||
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
||||
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
||||
)
|
||||
|
||||
var vs vars
|
||||
|
||||
vs.controllerImageRepo, vs.controllerImageTag = controllerImageRepo, controllerImageTag
|
||||
vs.runnerDindImageRepo = runnerDindImageRepo
|
||||
vs.runnerImageRepo = runnerImageRepo
|
||||
|
||||
// vs.controllerImage, vs.controllerImageTag
|
||||
|
||||
vs.prebuildImages = []testing.ContainerImage{
|
||||
controllerImage,
|
||||
runnerImage,
|
||||
runnerDindImage,
|
||||
}
|
||||
|
||||
vs.builds = []testing.DockerBuild{
|
||||
{
|
||||
Dockerfile: "../../Dockerfile",
|
||||
Args: []testing.BuildArg{},
|
||||
Image: controllerImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
{
|
||||
Dockerfile: "../../runner/actions-runner.dockerfile",
|
||||
Args: []testing.BuildArg{
|
||||
{
|
||||
Name: "RUNNER_VERSION",
|
||||
Value: "2.294.0",
|
||||
},
|
||||
},
|
||||
Image: runnerImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
{
|
||||
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
||||
Args: []testing.BuildArg{
|
||||
{
|
||||
Name: "RUNNER_VERSION",
|
||||
Value: "2.294.0",
|
||||
},
|
||||
},
|
||||
Image: runnerDindImage,
|
||||
EnableBuildX: true,
|
||||
},
|
||||
}
|
||||
|
||||
vs.commonScriptEnv = []string{
|
||||
"SYNC_PERIOD=" + "30s",
|
||||
"RUNNER_TAG=" + runnerImageTag,
|
||||
}
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
||||
t.Helper()
|
||||
|
||||
testingEnv := testing.Start(t, k8sMinorVer, testing.Preload(images...))
|
||||
testingEnv := testing.Start(t, k8sMinorVer)
|
||||
|
||||
e := &env{Env: testingEnv}
|
||||
|
||||
|
|
@ -309,6 +359,23 @@ func initTestEnv(t *testing.T, k8sMinorVer string) *env {
|
|||
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
||||
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
||||
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
||||
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
|
||||
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
|
||||
e.vars = vars
|
||||
|
||||
if e.remoteKubeconfig == "" {
|
||||
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
|
||||
e.Env.Kubeconfig = e.Kind.Kubeconfig()
|
||||
} else {
|
||||
e.Env.Kubeconfig = e.remoteKubeconfig
|
||||
|
||||
// Kind automatically installs https://github.com/rancher/local-path-provisioner for PVs.
|
||||
// But assuming the remote cluster isn't a kind Kubernetes cluster,
|
||||
// we need to install any provisioner manually.
|
||||
// Here, we install the local-path-provisioner on the remote cluster too,
|
||||
// so that we won't suffer from E2E failures due to the provisioner difference.
|
||||
e.KubectlApply(t, "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml", testing.KubectlConfig{})
|
||||
}
|
||||
|
||||
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
||||
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
||||
|
|
@ -328,8 +395,29 @@ func (e *env) f() {
|
|||
func (e *env) buildAndLoadImages(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
e.DockerBuild(t, builds)
|
||||
e.KindLoadImages(t, prebuildImages)
|
||||
e.DockerBuild(t, e.vars.builds)
|
||||
|
||||
if e.remoteKubeconfig == "" {
|
||||
e.KindLoadImages(t, e.vars.prebuildImages)
|
||||
} else {
|
||||
// If it fails with `no basic auth credentials` here, you might have missed logging into the container registry beforehand.
|
||||
// For ECR, run something like:
|
||||
// aws ecr get-login-password | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com
|
||||
// Also note that the authenticated session can be expired in a day or so(probably depends on your AWS config),
|
||||
// so you might better write a script to do docker login before running the E2E test.
|
||||
e.DockerPush(t, e.vars.prebuildImages)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *env) KindLoadImages(t *testing.T, prebuildImages []testing.ContainerImage) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := e.Kind.LoadImages(ctx, prebuildImages); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *env) installCertManager(t *testing.T) {
|
||||
|
|
@ -355,7 +443,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||
e.createControllerNamespaceAndServiceAccount(t)
|
||||
|
||||
scriptEnv := []string{
|
||||
"KUBECONFIG=" + e.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
||||
}
|
||||
|
||||
|
|
@ -364,6 +452,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||
"TEST_ID=" + testID,
|
||||
"NAME=" + repo,
|
||||
"VERSION=" + tag,
|
||||
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
|
||||
}
|
||||
|
||||
if e.useApp {
|
||||
|
|
@ -381,7 +470,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||
}
|
||||
|
||||
scriptEnv = append(scriptEnv, varEnv...)
|
||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
||||
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||
|
||||
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||
}
|
||||
|
|
@ -402,7 +491,7 @@ func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
|||
e.createControllerNamespaceAndServiceAccount(t)
|
||||
|
||||
scriptEnv := []string{
|
||||
"KUBECONFIG=" + e.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
"OP=" + op,
|
||||
}
|
||||
|
||||
|
|
@ -431,17 +520,17 @@ func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
|||
if e.dockerdWithinRunnerContainer {
|
||||
varEnv = append(varEnv,
|
||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
||||
"RUNNER_NAME="+runnerDindImageRepo,
|
||||
"RUNNER_NAME="+e.vars.runnerDindImageRepo,
|
||||
)
|
||||
} else {
|
||||
varEnv = append(varEnv,
|
||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=false",
|
||||
"RUNNER_NAME="+runnerImageRepo,
|
||||
"RUNNER_NAME="+e.vars.runnerImageRepo,
|
||||
)
|
||||
}
|
||||
|
||||
scriptEnv = append(scriptEnv, varEnv...)
|
||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
||||
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||
|
||||
e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||
}
|
||||
|
|
@ -458,7 +547,7 @@ func (e *env) doArgoTunnel(t *testing.T, op string) {
|
|||
t.Helper()
|
||||
|
||||
scriptEnv := []string{
|
||||
"KUBECONFIG=" + e.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
"OP=" + op,
|
||||
"TUNNEL_ID=" + os.Getenv("TUNNEL_ID"),
|
||||
"TUNNE_NAME=" + os.Getenv("TUNNEL_NAME"),
|
||||
|
|
@ -492,7 +581,15 @@ func (e *env) testJobs(testID string) []job {
|
|||
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
|
||||
t.Helper()
|
||||
|
||||
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID))
|
||||
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID), e.verifyTimeout())
|
||||
}
|
||||
|
||||
func (e *env) verifyTimeout() time.Duration {
|
||||
if e.VerifyTimeout > 0 {
|
||||
return e.VerifyTimeout
|
||||
}
|
||||
|
||||
return 8 * 60 * time.Second
|
||||
}
|
||||
|
||||
type job struct {
|
||||
|
|
@ -669,7 +766,7 @@ kubectl create cm %s$id --from-literal=status=ok
|
|||
}
|
||||
}
|
||||
|
||||
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
||||
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
var expected []string
|
||||
|
|
@ -687,7 +784,7 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||
testResultCMName := testJobs[i].configMapName
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + env.Kubeconfig(),
|
||||
"KUBECONFIG=" + env.Kubeconfig,
|
||||
}
|
||||
|
||||
cmCfg := testing.KubectlConfig{
|
||||
|
|
@ -719,5 +816,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||
}
|
||||
|
||||
return results, err
|
||||
}, 8*60*time.Second, 30*time.Second).Should(gomega.Equal(expected))
|
||||
}, timeout, 30*time.Second).Should(gomega.Equal(expected))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,3 +71,18 @@ func (k *Docker) dockerBuildCombinedOutput(ctx context.Context, build DockerBuil
|
|||
|
||||
return k.CombinedOutput(cmd)
|
||||
}
|
||||
|
||||
func (k *Docker) Push(ctx context.Context, images []ContainerImage) error {
|
||||
for _, img := range images {
|
||||
_, err := k.CombinedOutput(dockerPushCmd(ctx, img.Repo, img.Tag))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerPushCmd(ctx context.Context, repo, tag string) *exec.Cmd {
|
||||
return exec.CommandContext(ctx, "docker", "push", repo+":"+tag)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,21 +34,17 @@ func Img(repo, tag string) ContainerImage {
|
|||
// All of its methods are idempotent so that you can safely call it from within each subtest
|
||||
// and you can rerun the individual subtest until it works as you expect.
|
||||
type Env struct {
|
||||
kind *Kind
|
||||
docker *Docker
|
||||
Kubectl *Kubectl
|
||||
bash *Bash
|
||||
Kubeconfig string
|
||||
docker *Docker
|
||||
Kubectl *Kubectl
|
||||
bash *Bash
|
||||
}
|
||||
|
||||
func Start(t *testing.T, k8sMinorVer string, opts ...Option) *Env {
|
||||
func Start(t *testing.T, k8sMinorVer string) *Env {
|
||||
t.Helper()
|
||||
|
||||
k := StartKind(t, k8sMinorVer, opts...)
|
||||
|
||||
var env Env
|
||||
|
||||
env.kind = k
|
||||
|
||||
d := &Docker{}
|
||||
|
||||
env.docker = d
|
||||
|
|
@ -65,12 +61,12 @@ func Start(t *testing.T, k8sMinorVer string, opts ...Option) *Env {
|
|||
}
|
||||
|
||||
func (e *Env) GetOrGenerateTestID(t *testing.T) string {
|
||||
k, kctl := e.kind, e.Kubectl
|
||||
kctl := e.Kubectl
|
||||
|
||||
cmKey := "id"
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + k.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cmCfg := KubectlConfig{
|
||||
|
|
@ -95,10 +91,10 @@ func (e *Env) GetOrGenerateTestID(t *testing.T) string {
|
|||
}
|
||||
|
||||
func (e *Env) DeleteTestID(t *testing.T) {
|
||||
k, kctl := e.kind, e.Kubectl
|
||||
kctl := e.Kubectl
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + k.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cmCfg := KubectlConfig{
|
||||
|
|
@ -125,13 +121,13 @@ func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Env) KindLoadImages(t *testing.T, prebuildImages []ContainerImage) {
|
||||
func (e *Env) DockerPush(t *testing.T, images []ContainerImage) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := e.kind.LoadImages(ctx, prebuildImages); err != nil {
|
||||
if err := e.docker.Push(ctx, images); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -143,7 +139,7 @@ func (e *Env) KubectlApply(t *testing.T, path string, cfg KubectlConfig) {
|
|||
defer cancel()
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||
|
|
@ -160,7 +156,7 @@ func (e *Env) KubectlWaitUntilDeployAvailable(t *testing.T, name string, cfg Kub
|
|||
defer cancel()
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||
|
|
@ -177,7 +173,7 @@ func (e *Env) KubectlEnsureNS(t *testing.T, name string, cfg KubectlConfig) {
|
|||
defer cancel()
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||
|
|
@ -194,7 +190,7 @@ func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindin
|
|||
defer cancel()
|
||||
|
||||
kubectlEnv := []string{
|
||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
||||
"KUBECONFIG=" + e.Kubeconfig,
|
||||
}
|
||||
|
||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||
|
|
@ -206,10 +202,6 @@ func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindin
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Env) Kubeconfig() string {
|
||||
return e.kind.Kubeconfig()
|
||||
}
|
||||
|
||||
func (e *Env) RunScript(t *testing.T, path string, cfg ScriptConfig) {
|
||||
t.Helper()
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue