diff --git a/pkg/app/app.go b/pkg/app/app.go index 3d5b896f..03d78c43 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -1062,7 +1062,13 @@ func (a *App) visitStatesWithSelectorsAndRemoteSupport(fileOrDir string, converg } } - return a.visitStates(fileOrDir, opts, f) + // pre-overrides HelmState + fHelmStatsWithOverrides := func(st *state.HelmState) (bool, []error) { + st.Releases = st.GetReleasesWithOverrides() + return f(st) + } + + return a.visitStates(fileOrDir, opts, fHelmStatsWithOverrides) } func processFilteredReleases(st *state.HelmState, helm helmexec.Interface, converge func(st *state.HelmState) []error, includeTransitiveNeeds bool) (bool, []error) { @@ -1073,7 +1079,7 @@ func processFilteredReleases(st *state.HelmState, helm helmexec.Interface, conve } } - if err := checkDuplicates(helm, st, st.GetReleasesWithOverrides()); err != nil { + if err := checkDuplicates(helm, st, st.Releases); err != nil { return false, []error{err} } @@ -1201,7 +1207,7 @@ func (a *App) findDesiredStateFiles(specifiedPath string, opts LoadOpts) ([]stri } func (a *App) getSelectedReleases(r *Run, includeTransitiveNeeds bool) ([]state.ReleaseSpec, []state.ReleaseSpec, error) { - selected, err := r.state.GetSelectedReleasesWithOverrides(includeTransitiveNeeds) + selected, err := r.state.GetSelectedReleases(includeTransitiveNeeds) if err != nil { return nil, nil, err } @@ -1219,7 +1225,7 @@ func (a *App) getSelectedReleases(r *Run, includeTransitiveNeeds bool) ([]state. } } - allReleases := r.state.GetReleasesWithOverrides() + allReleases := r.state.Releases groupsByID := map[string][]*state.ReleaseSpec{} for _, r := range allReleases { @@ -1502,8 +1508,6 @@ func (a *App) delete(r *Run, purge bool, c DestroyConfigProvider) (bool, []error names[i] = fmt.Sprintf(" %s (%s)", r.Name, r.Chart) } - st.Releases = st.GetReleasesWithOverrides() - var errs []error msg := fmt.Sprintf(`Affected releases are: @@ -1608,7 +1612,7 @@ func (a *App) status(r *Run, c StatusesConfigProvider) (bool, []error) { st := r.state helm := r.helm - allReleases := st.GetReleasesWithOverrides() + allReleases := st.Releases selectedReleases, selectedAndNeededReleases, err := a.getSelectedReleases(r, false) if err != nil { diff --git a/pkg/app/app_list_test.go b/pkg/app/app_list_test.go index 528810ee..79602aab 100644 --- a/pkg/app/app_list_test.go +++ b/pkg/app/app_list_test.go @@ -310,7 +310,7 @@ releases: assert.Nil(t, err) }) - expected := `[{"name":"myrelease1","namespace":"","enabled":true,"installed":false,"labels":"id:myrelease1","chart":"mychart1","version":""},{"name":"myrelease2","namespace":"","enabled":false,"installed":true,"labels":"","chart":"mychart1","version":""},{"name":"myrelease3","namespace":"","enabled":true,"installed":true,"labels":"","chart":"mychart1","version":""},{"name":"myrelease4","namespace":"","enabled":true,"installed":true,"labels":"id:myrelease1","chart":"mychart1","version":""}] + expected := `[{"name":"myrelease1","namespace":"testNamespace","enabled":true,"installed":false,"labels":"id:myrelease1","chart":"mychart1","version":""},{"name":"myrelease2","namespace":"testNamespace","enabled":false,"installed":true,"labels":"","chart":"mychart1","version":""},{"name":"myrelease3","namespace":"testNamespace","enabled":true,"installed":true,"labels":"","chart":"mychart1","version":""},{"name":"myrelease4","namespace":"testNamespace","enabled":true,"installed":true,"labels":"id:myrelease1","chart":"mychart1","version":""}] ` assert.Equal(t, expected, out) } diff --git a/pkg/app/app_test.go b/pkg/app/app_test.go index cf5bb536..317902ea 100644 --- a/pkg/app/app_test.go +++ b/pkg/app/app_test.go @@ -4363,11 +4363,11 @@ releases: assert.Nil(t, err) }) - expected := `NAME NAMESPACE ENABLED INSTALLED LABELS CHART VERSION -myrelease1 true false common:label,id:myrelease1 mychart1 -myrelease2 false true common:label mychart1 -myrelease3 true true mychart1 -myrelease4 true true id:myrelease1 mychart1 + expected := `NAME NAMESPACE ENABLED INSTALLED LABELS CHART VERSION +myrelease1 testNamespace true false common:label,id:myrelease1 mychart1 +myrelease2 testNamespace false true common:label mychart1 +myrelease3 testNamespace true true mychart1 +myrelease4 testNamespace true true id:myrelease1 mychart1 ` assert.Equal(t, expected, out) diff --git a/pkg/app/run.go b/pkg/app/run.go index 90c88a16..8e349b16 100644 --- a/pkg/app/run.go +++ b/pkg/app/run.go @@ -78,7 +78,6 @@ func (r *Run) withPreparedCharts(helmfileCommand string, opts state.ChartPrepare for i := range r.state.Releases { rel := &r.state.Releases[i] - key := state.PrepareChartKey{ Name: rel.Name, Namespace: rel.Namespace, diff --git a/pkg/state/selector_test.go b/pkg/state/selector_test.go index 82efbee7..2f749ded 100644 --- a/pkg/state/selector_test.go +++ b/pkg/state/selector_test.go @@ -110,7 +110,9 @@ func TestSelectReleasesWithOverrides(t *testing.T) { for _, tc := range testcases { state.Selectors = tc.selector - rs, err := state.GetSelectedReleasesWithOverrides(false) + state.Releases = state.GetReleasesWithOverrides() + + rs, err := state.GetSelectedReleases(false) if err != nil { t.Fatalf("%s %s: %v", tc.selector, tc.subject, err) } @@ -178,8 +180,9 @@ func TestSelectReleasesWithOverridesWithIncludedTransitives(t *testing.T) { for _, tc := range testcases { state.Selectors = tc.selector + state.Releases = state.GetReleasesWithOverrides() - rs, err := state.GetSelectedReleasesWithOverrides(tc.includeTransitiveNeeds) + rs, err := state.GetSelectedReleases(tc.includeTransitiveNeeds) if err != nil { t.Fatalf("%s %s: %v", tc.selector, tc.subject, err) } diff --git a/pkg/state/state.go b/pkg/state/state.go index fcfc63d5..4b991692 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -1051,7 +1051,7 @@ func (st *HelmState) PrepareCharts(helm helmexec.Interface, dir string, concurre // This and releasesNeedCharts ensures that we run operations like helm-dep-build and prepare-hook calls only on // releases that are (1) selected by the selectors and (2) to be installed. - selected, err = st.GetSelectedReleasesWithOverrides(opts.IncludeTransitiveNeeds) + selected, err = st.GetSelectedReleases(opts.IncludeTransitiveNeeds) if err != nil { return nil, []error{err} } @@ -2079,9 +2079,9 @@ func (st *HelmState) GetReleasesWithOverrides() []ReleaseSpec { return rs } -func (st *HelmState) SelectReleasesWithOverrides(includeTransitiveNeeds bool) ([]Release, error) { +func (st *HelmState) SelectReleases(includeTransitiveNeeds bool) ([]Release, error) { values := st.Values() - rs, err := markExcludedReleases(st.GetReleasesWithOverrides(), st.Selectors, st.CommonLabels, values, includeTransitiveNeeds) + rs, err := markExcludedReleases(st.Releases, st.Selectors, st.CommonLabels, values, includeTransitiveNeeds) if err != nil { return nil, err } @@ -2200,8 +2200,8 @@ func collectNeedsWithTransitives(release ReleaseSpec, allReleases []ReleaseSpec, } } -func (st *HelmState) GetSelectedReleasesWithOverrides(includeTransitiveNeeds bool) ([]ReleaseSpec, error) { - filteredReleases, err := st.SelectReleasesWithOverrides(includeTransitiveNeeds) +func (st *HelmState) GetSelectedReleases(includeTransitiveNeeds bool) ([]ReleaseSpec, error) { + filteredReleases, err := st.SelectReleases(includeTransitiveNeeds) if err != nil { return nil, err } @@ -2217,7 +2217,7 @@ func (st *HelmState) GetSelectedReleasesWithOverrides(includeTransitiveNeeds boo // FilterReleases allows for the execution of helm commands against a subset of the releases in the helmfile. func (st *HelmState) FilterReleases(includeTransitiveNeeds bool) error { - releases, err := st.GetSelectedReleasesWithOverrides(includeTransitiveNeeds) + releases, err := st.GetSelectedReleases(includeTransitiveNeeds) if err != nil { return err } @@ -2305,7 +2305,7 @@ func (st *HelmState) UpdateDeps(helm helmexec.Interface, includeTransitiveNeeds // This and releasesNeedCharts ensures that we run operations like helm-dep-build and prepare-hook calls only on // releases that are (1) selected by the selectors and (2) to be installed. - selected, err = st.GetSelectedReleasesWithOverrides(includeTransitiveNeeds) + selected, err = st.GetSelectedReleases(includeTransitiveNeeds) if err != nil { return []error{err} } diff --git a/pkg/state/state_run.go b/pkg/state/state_run.go index 3a89197f..814d4fd0 100644 --- a/pkg/state/state_run.go +++ b/pkg/state/state_run.go @@ -109,7 +109,7 @@ type PlanOptions struct { } func (st *HelmState) PlanReleases(opts PlanOptions) ([][]Release, error) { - marked, err := st.SelectReleasesWithOverrides(opts.IncludeTransitiveNeeds) + marked, err := st.SelectReleases(opts.IncludeTransitiveNeeds) if err != nil { return nil, err } diff --git a/pkg/state/state_test.go b/pkg/state/state_test.go index 0765e9dd..311c8594 100644 --- a/pkg/state/state_test.go +++ b/pkg/state/state_test.go @@ -109,14 +109,6 @@ func TestHelmState_applyDefaultsTo(t *testing.T) { }, want: specWithNamespace, }, - { - name: "Has a namespace from flags", - fields: fieldsWithoutNamespace, - args: args{ - spec: specWithNamespace, - }, - want: specWithNamespace, - }, { name: "Has a namespace from flags and from spec", fields: fieldsWithNamespace, @@ -125,6 +117,22 @@ func TestHelmState_applyDefaultsTo(t *testing.T) { }, want: specWithNamespaceFromFields, }, + { + name: "Spec and flag Has no a namespace", + fields: fieldsWithoutNamespace, + args: args{ + spec: specWithoutNamespace, + }, + want: specWithoutNamespace, + }, + { + name: "Spec has no a namespace but from flag", + fields: fieldsWithNamespace, + args: args{ + spec: specWithoutNamespace, + }, + want: specWithNamespaceFromFields, + }, } for i := range tests { tt := tests[i] diff --git a/test/e2e/template/helmfile/snapshot_test.go b/test/e2e/template/helmfile/snapshot_test.go index 571cf853..1727a65d 100644 --- a/test/e2e/template/helmfile/snapshot_test.go +++ b/test/e2e/template/helmfile/snapshot_test.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strings" "testing" @@ -17,6 +18,11 @@ import ( "gopkg.in/yaml.v3" ) +var ( + // e.g. https_github_com_cloudposse_helmfiles_git.ref=0.xx.0 + chartGitFullPathRegex = regexp.MustCompile(`chart=.*git\.ref=.*/charts/.*`) +) + type ociChart struct { name string version string @@ -172,6 +178,10 @@ func TestHelmfileTemplateWithBuildCommand(t *testing.T) { gotStr := string(got) gotStr = strings.ReplaceAll(gotStr, fmt.Sprintf("chart=%s", wd), "chart=$WD") + + // Replace go-getter path with $GoGetterPath + gotStr = chartGitFullPathRegex.ReplaceAllString(gotStr, `chart=$$GoGetterPath`) + // OCI based helm charts are pulled and exported under temporary directory. // We are not sure the exact name of the temporary directory generated by helmfile, // so redact its base directory name with $TMP. diff --git a/test/e2e/template/helmfile/testdata/snapshot/issue_2098_release_template_needs/output.yaml b/test/e2e/template/helmfile/testdata/snapshot/issue_2098_release_template_needs/output.yaml index 32490372..eac7d29e 100644 --- a/test/e2e/template/helmfile/testdata/snapshot/issue_2098_release_template_needs/output.yaml +++ b/test/e2e/template/helmfile/testdata/snapshot/issue_2098_release_template_needs/output.yaml @@ -18,7 +18,7 @@ releases: - chart: aservo/util version: 0.0.1 needs: - - default-shared-resources + - default/default-shared-resources name: default-release-resources namespace: default labels: diff --git a/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/config.yaml b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/config.yaml new file mode 100644 index 00000000..69e56e2e --- /dev/null +++ b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/config.yaml @@ -0,0 +1,5 @@ +chartifyTempDir: template_go_getter_with_selector_temp +helmfileArgs: +- template +- -l +- name=acme-jx diff --git a/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/input.yaml b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/input.yaml new file mode 100644 index 00000000..75323838 --- /dev/null +++ b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/input.yaml @@ -0,0 +1,17 @@ +filepath: "" +namespace: istio-system +repositories: +- name: istio + url: https://istio-release.storage.googleapis.com/charts +releases: +- chart: git::https://github.com/joshuasimon-taulia/acme.git@charts/acme?ref=master + name: acme-jx + labels: + values.jenkins-x.io: lock + version.jenkins-x.io: lock + skipDeps: true +- chart: istio/base + version: 1.12.2 + name: istio-base +templates: {} +renderedvalues: {} \ No newline at end of file diff --git a/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/output.yaml b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/output.yaml new file mode 100644 index 00000000..c5792331 --- /dev/null +++ b/test/e2e/template/helmfile/testdata/snapshot/issue_498_template_go_getter_with_selector/output.yaml @@ -0,0 +1,6 @@ +Adding repo istio https://istio-release.storage.googleapis.com/charts +"istio" has been added to your repositories + +Templating release=acme-jx, chart=$GoGetterPath + + diff --git a/test/integration/chart-needs-golden/diff b/test/integration/chart-needs-golden/diff index af993944..fcf787ac 100644 --- a/test/integration/chart-needs-golden/diff +++ b/test/integration/chart-needs-golden/diff @@ -3,6 +3,7 @@ Release was not present in Helm. Diff will show entire contents as new. ******************** +helmfile-tests, managed-csi, StorageClass (storage.k8s.io) has been added: - + # Source: azuredisk-csi-storageclass/templates/azuredisk-csi-storageclass.yaml + # Source: azuredisk-csi-storageclass/templates/azuredisk-csi-storageclass.yaml @@ -10,6 +11,7 @@ + kind: StorageClass + metadata: + name: managed-csi ++ namespace: helmfile-tests + provisioner: disk.csi.azure.com + parameters: + skuName: Premium_LRS @@ -23,6 +25,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi Release was not present in Helm. Diff will show entire contents as new. ******************** +helmfile-tests, azuredisk-csi-attacher-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRoleBinding @@ -38,10 +41,12 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-provisioner-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRoleBinding @@ -57,10 +62,12 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-resizer-role, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRoleBinding @@ -76,10 +83,12 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: azuredisk-external-resizer-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-snapshotter-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRoleBinding @@ -95,10 +104,12 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-external-attacher-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRole @@ -130,6 +141,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, azuredisk-external-provisioner-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRole @@ -170,6 +182,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, azuredisk-external-resizer-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRole @@ -201,6 +214,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +helmfile-tests, azuredisk-external-snapshotter-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRole @@ -232,12 +246,14 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, csi-azuredisk-controller, Deployment (apps) has been added: - + # Source: azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml + kind: Deployment + apiVersion: apps/v1 + metadata: + name: csi-azuredisk-controller ++ namespace: helmfile-tests + labels: + app.kubernetes.io/instance: "azuredisk-csi-driver" + app.kubernetes.io/managed-by: "Helm" @@ -283,6 +299,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - "--v=2" + - "--timeout=15s" + - "--leader-election" ++ - "--leader-election-namespace=helmfile-tests" + - "--worker-threads=50" + - "--extra-create-metadata=true" + - "--strict-topology=true" @@ -305,6 +322,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - "-csi-address=$(ADDRESS)" + - "-timeout=1200s" + - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" + - "-worker-threads=500" + env: + - name: ADDRESS @@ -323,6 +341,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + args: + - "-csi-address=$(ADDRESS)" + - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" + - "-v=2" + env: + - name: ADDRESS @@ -342,6 +361,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" + - '-handle-volume-inuse-error=false' + - '-feature-gates=RecoverVolumeExpansionFailure=true' + - "-timeout=240s" @@ -433,18 +453,21 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate +helmfile-tests, csi-azuredisk-controller-sa, ServiceAccount (v1) has been added: - + # Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + labels: + app.kubernetes.io/instance: "azuredisk-csi-driver" + app.kubernetes.io/managed-by: "Helm" + app.kubernetes.io/name: "azuredisk-csi-driver" + app.kubernetes.io/version: "v1.23.0" + helm.sh/chart: "azuredisk-csi-driver-v1.23.0" +helmfile-tests, csi-azuredisk-controller-secret-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRoleBinding @@ -454,10 +477,12 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: csi-azuredisk-controller-secret-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, csi-azuredisk-controller-secret-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml + kind: ClusterRole @@ -468,12 +493,14 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] +helmfile-tests, csi-azuredisk-node, DaemonSet (apps) has been added: - + # Source: azuredisk-csi-driver/templates/csi-azuredisk-node.yaml + kind: DaemonSet + apiVersion: apps/v1 + metadata: + name: csi-azuredisk-node ++ namespace: helmfile-tests + labels: + app.kubernetes.io/instance: "azuredisk-csi-driver" + app.kubernetes.io/managed-by: "Helm" @@ -659,6 +686,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + path: /sys/class/ + type: Directory + name: sys-class +helmfile-tests, csi-azuredisk-node-role, ClusterRole (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml + kind: ClusterRole @@ -672,18 +700,21 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +helmfile-tests, csi-azuredisk-node-sa, ServiceAccount (v1) has been added: - + # Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: csi-azuredisk-node-sa ++ namespace: helmfile-tests + labels: + app.kubernetes.io/instance: "azuredisk-csi-driver" + app.kubernetes.io/managed-by: "Helm" + app.kubernetes.io/name: "azuredisk-csi-driver" + app.kubernetes.io/version: "v1.23.0" + helm.sh/chart: "azuredisk-csi-driver-v1.23.0" +helmfile-tests, csi-azuredisk-node-secret-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml + kind: ClusterRoleBinding @@ -693,16 +724,19 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + subjects: + - kind: ServiceAccount + name: csi-azuredisk-node-sa ++ namespace: helmfile-tests + roleRef: + kind: ClusterRole + name: csi-azuredisk-node-role + apiGroup: rbac.authorization.k8s.io +helmfile-tests, csi-azuredisk-node-win, DaemonSet (apps) has been added: - + # Source: azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml + kind: DaemonSet + apiVersion: apps/v1 + metadata: + name: csi-azuredisk-node-win ++ namespace: helmfile-tests + labels: + app.kubernetes.io/instance: "azuredisk-csi-driver" + app.kubernetes.io/managed-by: "Helm" @@ -911,6 +945,7 @@ Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi + hostPath: + path: C:\k + type: DirectoryOrCreate +helmfile-tests, disk.csi.azure.com, CSIDriver (storage.k8s.io) has been added: - + # Source: azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml + apiVersion: storage.k8s.io/v1 diff --git a/test/integration/chart-needs-golden/diff-live b/test/integration/chart-needs-golden/diff-live new file mode 100644 index 00000000..edce0ab6 --- /dev/null +++ b/test/integration/chart-needs-golden/diff-live @@ -0,0 +1,961 @@ +"azuredisk-csi-driver" has been added to your repositories +******************** + + Release was not present in Helm. Diff will show entire contents as new. + +******************** +******************** + + Release was not present in Helm. Diff will show entire contents as new. + +******************** +helmfile-tests, managed-csi, StorageClass (storage.k8s.io) has been added: +- ++ # Source: azuredisk-csi-storageclass/templates/azuredisk-csi-storageclass.yaml ++ # Source: azuredisk-csi-storageclass/templates/azuredisk-csi-storageclass.yaml ++ apiVersion: storage.k8s.io/v1 ++ kind: StorageClass ++ metadata: ++ name: managed-csi ++ namespace: helmfile-tests ++ provisioner: disk.csi.azure.com ++ parameters: ++ skuName: Premium_LRS ++ reclaimPolicy: Retain ++ volumeBindingMode: Immediate ++ allowVolumeExpansion: true +helmfile-tests, azuredisk-csi-attacher-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-csi-attacher-binding ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: azuredisk-external-attacher-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-provisioner-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-csi-provisioner-binding ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: azuredisk-external-provisioner-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-resizer-role, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-csi-resizer-role ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: azuredisk-external-resizer-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-csi-snapshotter-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-csi-snapshotter-binding ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: azuredisk-external-snapshotter-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, azuredisk-external-attacher-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-external-attacher-role ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ rules: ++ - apiGroups: [""] ++ resources: ["persistentvolumes"] ++ verbs: ["get", "list", "watch", "update"] ++ - apiGroups: [""] ++ resources: ["nodes"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: ["csi.storage.k8s.io"] ++ resources: ["csinodeinfos"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: ["storage.k8s.io"] ++ resources: ["volumeattachments"] ++ verbs: ["get", "list", "watch", "update", "patch"] ++ - apiGroups: ["storage.k8s.io"] ++ resources: ["volumeattachments/status"] ++ verbs: ["get", "list", "watch", "update", "patch"] ++ - apiGroups: ["coordination.k8s.io"] ++ resources: ["leases"] ++ verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, azuredisk-external-provisioner-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-external-provisioner-role ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ rules: ++ - apiGroups: [""] ++ resources: ["persistentvolumes"] ++ verbs: ["get", "list", "watch", "create", "delete"] ++ - apiGroups: [""] ++ resources: ["persistentvolumeclaims"] ++ verbs: ["get", "list", "watch", "update"] ++ - apiGroups: ["storage.k8s.io"] ++ resources: ["storageclasses"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: [""] ++ resources: ["events"] ++ verbs: ["get", "list", "watch", "create", "update", "patch"] ++ - apiGroups: ["storage.k8s.io"] ++ resources: ["csinodes"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: [""] ++ resources: ["nodes"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: ["snapshot.storage.k8s.io"] ++ resources: ["volumesnapshots"] ++ verbs: ["get", "list"] ++ - apiGroups: ["snapshot.storage.k8s.io"] ++ resources: ["volumesnapshotcontents"] ++ verbs: ["get", "list"] ++ - apiGroups: ["coordination.k8s.io"] ++ resources: ["leases"] ++ verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, azuredisk-external-resizer-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-external-resizer-role ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ rules: ++ - apiGroups: [""] ++ resources: ["persistentvolumes"] ++ verbs: ["get", "list", "watch", "update", "patch"] ++ - apiGroups: [""] ++ resources: ["persistentvolumeclaims"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: [""] ++ resources: ["persistentvolumeclaims/status"] ++ verbs: ["update", "patch"] ++ - apiGroups: [""] ++ resources: ["events"] ++ verbs: ["list", "watch", "create", "update", "patch"] ++ - apiGroups: ["coordination.k8s.io"] ++ resources: ["leases"] ++ verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] ++ - apiGroups: [""] ++ resources: ["pods"] ++ verbs: ["get", "list", "watch"] +helmfile-tests, azuredisk-external-snapshotter-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: azuredisk-external-snapshotter-role ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ rules: ++ - apiGroups: [""] ++ resources: ["events"] ++ verbs: ["list", "watch", "create", "update", "patch"] ++ - apiGroups: [""] ++ resources: ["secrets"] ++ verbs: ["get", "list"] ++ - apiGroups: ["snapshot.storage.k8s.io"] ++ resources: ["volumesnapshotclasses"] ++ verbs: ["get", "list", "watch"] ++ - apiGroups: ["snapshot.storage.k8s.io"] ++ resources: ["volumesnapshotcontents"] ++ verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] ++ - apiGroups: ["snapshot.storage.k8s.io"] ++ resources: ["volumesnapshotcontents/status"] ++ verbs: ["update", "patch"] ++ - apiGroups: ["coordination.k8s.io"] ++ resources: ["leases"] ++ verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] +helmfile-tests, csi-azuredisk-controller, Deployment (apps) has been added: +- ++ # Source: azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml ++ kind: Deployment ++ apiVersion: apps/v1 ++ metadata: ++ name: csi-azuredisk-controller ++ namespace: helmfile-tests ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ spec: ++ replicas: 2 ++ selector: ++ matchLabels: ++ app: csi-azuredisk-controller ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ app: csi-azuredisk-controller ++ spec: ++ hostNetwork: true ++ serviceAccountName: csi-azuredisk-controller-sa ++ nodeSelector: ++ kubernetes.io/os: linux ++ priorityClassName: system-cluster-critical ++ tolerations: ++ - effect: NoSchedule ++ key: node-role.kubernetes.io/master ++ operator: Exists ++ - effect: NoSchedule ++ key: node-role.kubernetes.io/controlplane ++ operator: Exists ++ - effect: NoSchedule ++ key: node-role.kubernetes.io/control-plane ++ operator: Exists ++ containers: ++ - name: csi-provisioner ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.2.0" ++ args: ++ - "--feature-gates=Topology=true" ++ - "--csi-address=$(ADDRESS)" ++ - "--v=2" ++ - "--timeout=15s" ++ - "--leader-election" ++ - "--leader-election-namespace=helmfile-tests" ++ - "--worker-threads=50" ++ - "--extra-create-metadata=true" ++ - "--strict-topology=true" ++ env: ++ - name: ADDRESS ++ value: /csi/csi.sock ++ volumeMounts: ++ - mountPath: /csi ++ name: socket-dir ++ resources: ++ limits: ++ memory: 500Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: csi-attacher ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v3.5.0" ++ args: ++ - "-v=2" ++ - "-csi-address=$(ADDRESS)" ++ - "-timeout=1200s" ++ - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" ++ - "-worker-threads=500" ++ env: ++ - name: ADDRESS ++ value: /csi/csi.sock ++ volumeMounts: ++ - mountPath: /csi ++ name: socket-dir ++ resources: ++ limits: ++ memory: 500Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: csi-snapshotter ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v5.0.1" ++ args: ++ - "-csi-address=$(ADDRESS)" ++ - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" ++ - "-v=2" ++ env: ++ - name: ADDRESS ++ value: /csi/csi.sock ++ volumeMounts: ++ - name: socket-dir ++ mountPath: /csi ++ resources: ++ limits: ++ memory: 100Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: csi-resizer ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.5.0" ++ args: ++ - "-csi-address=$(ADDRESS)" ++ - "-v=2" ++ - "-leader-election" ++ - "--leader-election-namespace=helmfile-tests" ++ - '-handle-volume-inuse-error=false' ++ - '-feature-gates=RecoverVolumeExpansionFailure=true' ++ - "-timeout=240s" ++ env: ++ - name: ADDRESS ++ value: /csi/csi.sock ++ volumeMounts: ++ - name: socket-dir ++ mountPath: /csi ++ resources: ++ limits: ++ memory: 500Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: liveness-probe ++ image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.7.0" ++ args: ++ - --csi-address=/csi/csi.sock ++ - --probe-timeout=3s ++ - --health-port=29602 ++ - --v=2 ++ volumeMounts: ++ - name: socket-dir ++ mountPath: /csi ++ resources: ++ limits: ++ memory: 100Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: azuredisk ++ image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.23.0" ++ args: ++ - "--v=5" ++ - "--endpoint=$(CSI_ENDPOINT)" ++ - "--metrics-address=0.0.0.0:29604" ++ - "--disable-avset-nodes=false" ++ - "--vm-type=" ++ - "--drivername=disk.csi.azure.com" ++ - "--cloud-config-secret-name=azure-cloud-provider" ++ - "--cloud-config-secret-namespace=kube-system" ++ - "--custom-user-agent=" ++ - "--user-agent-suffix=OSS-helm" ++ - "--allow-empty-cloud-config=false" ++ - "--vmss-cache-ttl-seconds=-1" ++ ports: ++ - containerPort: 29602 ++ name: healthz ++ protocol: TCP ++ - containerPort: 29604 ++ name: metrics ++ protocol: TCP ++ livenessProbe: ++ failureThreshold: 5 ++ httpGet: ++ path: /healthz ++ port: healthz ++ initialDelaySeconds: 30 ++ timeoutSeconds: 10 ++ periodSeconds: 30 ++ env: ++ - name: AZURE_CREDENTIAL_FILE ++ valueFrom: ++ configMapKeyRef: ++ name: azure-cred-file ++ key: path ++ optional: true ++ - name: CSI_ENDPOINT ++ value: unix:///csi/csi.sock ++ - name: AZURE_GO_SDK_LOG_LEVEL ++ value: ++ imagePullPolicy: IfNotPresent ++ volumeMounts: ++ - mountPath: /csi ++ name: socket-dir ++ - mountPath: /etc/kubernetes/ ++ name: azure-cred ++ resources: ++ limits: ++ memory: 500Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ volumes: ++ - name: socket-dir ++ emptyDir: {} ++ - name: azure-cred ++ hostPath: ++ path: /etc/kubernetes/ ++ type: DirectoryOrCreate +helmfile-tests, csi-azuredisk-controller-sa, ServiceAccount (v1) has been added: +- ++ # Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ metadata: ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" +helmfile-tests, csi-azuredisk-controller-secret-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: csi-azuredisk-controller-secret-binding ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-controller-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: csi-azuredisk-controller-secret-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, csi-azuredisk-controller-secret-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: csi-azuredisk-controller-secret-role ++ rules: ++ - apiGroups: [""] ++ resources: ["secrets"] ++ verbs: ["get"] +helmfile-tests, csi-azuredisk-node, DaemonSet (apps) has been added: +- ++ # Source: azuredisk-csi-driver/templates/csi-azuredisk-node.yaml ++ kind: DaemonSet ++ apiVersion: apps/v1 ++ metadata: ++ name: csi-azuredisk-node ++ namespace: helmfile-tests ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ spec: ++ updateStrategy: ++ rollingUpdate: ++ maxUnavailable: 1 ++ type: RollingUpdate ++ selector: ++ matchLabels: ++ app: csi-azuredisk-node ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ app: csi-azuredisk-node ++ spec: ++ hostNetwork: true ++ dnsPolicy: Default ++ serviceAccountName: csi-azuredisk-node-sa ++ nodeSelector: ++ kubernetes.io/os: linux ++ affinity: ++ nodeAffinity: ++ requiredDuringSchedulingIgnoredDuringExecution: ++ nodeSelectorTerms: ++ - matchExpressions: ++ - key: type ++ operator: NotIn ++ values: ++ - virtual-kubelet ++ priorityClassName: system-node-critical ++ tolerations: ++ - operator: Exists ++ containers: ++ - name: liveness-probe ++ volumeMounts: ++ - mountPath: /csi ++ name: socket-dir ++ image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.7.0" ++ args: ++ - --csi-address=/csi/csi.sock ++ - --probe-timeout=3s ++ - --health-port=29603 ++ - --v=2 ++ resources: ++ limits: ++ memory: 100Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: node-driver-registrar ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.5.1" ++ args: ++ - --csi-address=$(ADDRESS) ++ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) ++ - --v=2 ++ livenessProbe: ++ exec: ++ command: ++ - /csi-node-driver-registrar ++ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) ++ - --mode=kubelet-registration-probe ++ initialDelaySeconds: 30 ++ timeoutSeconds: 15 ++ env: ++ - name: ADDRESS ++ value: /csi/csi.sock ++ - name: DRIVER_REG_SOCK_PATH ++ value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock ++ volumeMounts: ++ - name: socket-dir ++ mountPath: /csi ++ - name: registration-dir ++ mountPath: /registration ++ resources: ++ limits: ++ memory: 100Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ - name: azuredisk ++ image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.23.0" ++ args: ++ - "--v=5" ++ - "--endpoint=$(CSI_ENDPOINT)" ++ - "--nodeid=$(KUBE_NODE_NAME)" ++ - "--metrics-address=0.0.0.0:29605" ++ - "--enable-perf-optimization=true" ++ - "--drivername=disk.csi.azure.com" ++ - "--volume-attach-limit=-1" ++ - "--cloud-config-secret-name=azure-cloud-provider" ++ - "--cloud-config-secret-namespace=kube-system" ++ - "--custom-user-agent=" ++ - "--user-agent-suffix=OSS-helm" ++ - "--allow-empty-cloud-config=true" ++ - "--support-zone=true" ++ - "--get-node-info-from-labels=false" ++ ports: ++ - containerPort: 29603 ++ name: healthz ++ protocol: TCP ++ livenessProbe: ++ failureThreshold: 5 ++ httpGet: ++ path: /healthz ++ port: healthz ++ initialDelaySeconds: 30 ++ timeoutSeconds: 10 ++ periodSeconds: 30 ++ env: ++ - name: AZURE_CREDENTIAL_FILE ++ valueFrom: ++ configMapKeyRef: ++ name: azure-cred-file ++ key: path ++ optional: true ++ - name: CSI_ENDPOINT ++ value: unix:///csi/csi.sock ++ - name: KUBE_NODE_NAME ++ valueFrom: ++ fieldRef: ++ apiVersion: v1 ++ fieldPath: spec.nodeName ++ - name: AZURE_GO_SDK_LOG_LEVEL ++ value: ++ imagePullPolicy: IfNotPresent ++ securityContext: ++ privileged: true ++ volumeMounts: ++ - mountPath: /csi ++ name: socket-dir ++ - mountPath: /var/lib/kubelet/ ++ mountPropagation: Bidirectional ++ name: mountpoint-dir ++ - mountPath: /etc/kubernetes/ ++ name: azure-cred ++ - mountPath: /dev ++ name: device-dir ++ - mountPath: /sys/bus/scsi/devices ++ name: sys-devices-dir ++ - mountPath: /sys/class/ ++ name: sys-class ++ resources: ++ limits: ++ memory: 200Mi ++ requests: ++ cpu: 10m ++ memory: 20Mi ++ volumes: ++ - hostPath: ++ path: /var/lib/kubelet/plugins/disk.csi.azure.com ++ type: DirectoryOrCreate ++ name: socket-dir ++ - hostPath: ++ path: /var/lib/kubelet/ ++ type: DirectoryOrCreate ++ name: mountpoint-dir ++ - hostPath: ++ path: /var/lib/kubelet/plugins_registry/ ++ type: DirectoryOrCreate ++ name: registration-dir ++ - hostPath: ++ path: /etc/kubernetes/ ++ type: DirectoryOrCreate ++ name: azure-cred ++ - hostPath: ++ path: /dev ++ type: Directory ++ name: device-dir ++ - hostPath: ++ path: /sys/bus/scsi/devices ++ type: Directory ++ name: sys-devices-dir ++ - hostPath: ++ path: /sys/class/ ++ type: Directory ++ name: sys-class +helmfile-tests, csi-azuredisk-node-role, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml ++ kind: ClusterRole ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: csi-azuredisk-node-role ++ rules: ++ - apiGroups: [""] ++ resources: ["secrets"] ++ verbs: ["get"] ++ - apiGroups: [""] ++ resources: ["nodes"] ++ verbs: ["get"] +helmfile-tests, csi-azuredisk-node-sa, ServiceAccount (v1) has been added: +- ++ # Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ metadata: ++ name: csi-azuredisk-node-sa ++ namespace: helmfile-tests ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" +helmfile-tests, csi-azuredisk-node-secret-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml ++ kind: ClusterRoleBinding ++ apiVersion: rbac.authorization.k8s.io/v1 ++ metadata: ++ name: csi-azuredisk-node-secret-binding ++ subjects: ++ - kind: ServiceAccount ++ name: csi-azuredisk-node-sa ++ namespace: helmfile-tests ++ roleRef: ++ kind: ClusterRole ++ name: csi-azuredisk-node-role ++ apiGroup: rbac.authorization.k8s.io +helmfile-tests, csi-azuredisk-node-win, DaemonSet (apps) has been added: +- ++ # Source: azuredisk-csi-driver/templates/csi-azuredisk-node-windows.yaml ++ kind: DaemonSet ++ apiVersion: apps/v1 ++ metadata: ++ name: csi-azuredisk-node-win ++ namespace: helmfile-tests ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ spec: ++ updateStrategy: ++ rollingUpdate: ++ maxUnavailable: 1 ++ type: RollingUpdate ++ selector: ++ matchLabels: ++ app: csi-azuredisk-node-win ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/instance: "azuredisk-csi-driver" ++ app.kubernetes.io/managed-by: "Helm" ++ app.kubernetes.io/name: "azuredisk-csi-driver" ++ app.kubernetes.io/version: "v1.23.0" ++ helm.sh/chart: "azuredisk-csi-driver-v1.23.0" ++ app: csi-azuredisk-node-win ++ spec: ++ serviceAccountName: csi-azuredisk-node-sa ++ tolerations: ++ - effect: NoSchedule ++ key: node.kubernetes.io/os ++ operator: Exists ++ nodeSelector: ++ kubernetes.io/os: windows ++ affinity: ++ nodeAffinity: ++ requiredDuringSchedulingIgnoredDuringExecution: ++ nodeSelectorTerms: ++ - matchExpressions: ++ - key: type ++ operator: NotIn ++ values: ++ - virtual-kubelet ++ priorityClassName: system-node-critical ++ containers: ++ - name: liveness-probe ++ volumeMounts: ++ - mountPath: C:\csi ++ name: plugin-dir ++ image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.7.0" ++ args: ++ - "--csi-address=$(CSI_ENDPOINT)" ++ - "--probe-timeout=3s" ++ - "--health-port=29603" ++ - "--v=2" ++ env: ++ - name: CSI_ENDPOINT ++ value: unix://C:\\csi\\csi.sock ++ imagePullPolicy: IfNotPresent ++ resources: ++ limits: ++ memory: 150Mi ++ requests: ++ cpu: 10m ++ memory: 40Mi ++ - name: node-driver-registrar ++ image: "mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.5.1" ++ args: ++ - "--v=2" ++ - "--csi-address=$(CSI_ENDPOINT)" ++ - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" ++ livenessProbe: ++ exec: ++ command: ++ - /csi-node-driver-registrar.exe ++ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) ++ - --mode=kubelet-registration-probe ++ initialDelaySeconds: 60 ++ timeoutSeconds: 30 ++ env: ++ - name: CSI_ENDPOINT ++ value: unix://C:\\csi\\csi.sock ++ - name: DRIVER_REG_SOCK_PATH ++ value: C:\\var\\lib\\kubelet\\plugins\\disk.csi.azure.com\\csi.sock ++ - name: KUBE_NODE_NAME ++ valueFrom: ++ fieldRef: ++ fieldPath: spec.nodeName ++ imagePullPolicy: IfNotPresent ++ volumeMounts: ++ - name: kubelet-dir ++ mountPath: "C:\\var\\lib\\kubelet" ++ - name: plugin-dir ++ mountPath: C:\csi ++ - name: registration-dir ++ mountPath: C:\registration ++ resources: ++ limits: ++ memory: 150Mi ++ requests: ++ cpu: 30m ++ memory: 40Mi ++ - name: azuredisk ++ image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.23.0" ++ args: ++ - "--v=5" ++ - "--endpoint=$(CSI_ENDPOINT)" ++ - "--nodeid=$(KUBE_NODE_NAME)" ++ - "--metrics-address=0.0.0.0:29605" ++ - "--drivername=disk.csi.azure.com" ++ - "--volume-attach-limit=-1" ++ - "--cloud-config-secret-name=azure-cloud-provider" ++ - "--cloud-config-secret-namespace=kube-system" ++ - "--custom-user-agent=" ++ - "--user-agent-suffix=OSS-helm" ++ - "--allow-empty-cloud-config=true" ++ - "--support-zone=true" ++ - "--get-node-info-from-labels=false" ++ ports: ++ - containerPort: 29603 ++ name: healthz ++ protocol: TCP ++ livenessProbe: ++ failureThreshold: 5 ++ httpGet: ++ path: /healthz ++ port: healthz ++ initialDelaySeconds: 30 ++ timeoutSeconds: 10 ++ periodSeconds: 30 ++ env: ++ - name: AZURE_CREDENTIAL_FILE ++ valueFrom: ++ configMapKeyRef: ++ name: azure-cred-file ++ key: path-windows ++ optional: true ++ - name: CSI_ENDPOINT ++ value: unix://C:\\csi\\csi.sock ++ - name: KUBE_NODE_NAME ++ valueFrom: ++ fieldRef: ++ apiVersion: v1 ++ fieldPath: spec.nodeName ++ - name: AZURE_GO_SDK_LOG_LEVEL ++ value: ++ imagePullPolicy: IfNotPresent ++ volumeMounts: ++ - name: kubelet-dir ++ mountPath: "C:\\var\\lib\\kubelet" ++ - name: plugin-dir ++ mountPath: C:\csi ++ - name: azure-config ++ mountPath: C:\k ++ - name: csi-proxy-fs-pipe-v1 ++ mountPath: \\.\pipe\csi-proxy-filesystem-v1 ++ - name: csi-proxy-disk-pipe-v1 ++ mountPath: \\.\pipe\csi-proxy-disk-v1 ++ - name: csi-proxy-volume-pipe-v1 ++ mountPath: \\.\pipe\csi-proxy-volume-v1 ++ # these paths are still included for compatibility, they're used ++ # only if the node has still the beta version of the CSI proxy ++ - name: csi-proxy-fs-pipe-v1beta1 ++ mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1 ++ - name: csi-proxy-disk-pipe-v1beta2 ++ mountPath: \\.\pipe\csi-proxy-disk-v1beta2 ++ - name: csi-proxy-volume-pipe-v1beta2 ++ mountPath: \\.\pipe\csi-proxy-volume-v1beta2 ++ resources: ++ limits: ++ memory: 200Mi ++ requests: ++ cpu: 10m ++ memory: 40Mi ++ volumes: ++ - name: csi-proxy-fs-pipe-v1 ++ hostPath: ++ path: \\.\pipe\csi-proxy-filesystem-v1 ++ - name: csi-proxy-disk-pipe-v1 ++ hostPath: ++ path: \\.\pipe\csi-proxy-disk-v1 ++ - name: csi-proxy-volume-pipe-v1 ++ hostPath: ++ path: \\.\pipe\csi-proxy-volume-v1 ++ # these paths are still included for compatibility, they're used ++ # only if the node has still the beta version of the CSI proxy ++ - name: csi-proxy-fs-pipe-v1beta1 ++ hostPath: ++ path: \\.\pipe\csi-proxy-filesystem-v1beta1 ++ - name: csi-proxy-disk-pipe-v1beta2 ++ hostPath: ++ path: \\.\pipe\csi-proxy-disk-v1beta2 ++ - name: csi-proxy-volume-pipe-v1beta2 ++ hostPath: ++ path: \\.\pipe\csi-proxy-volume-v1beta2 ++ - name: registration-dir ++ hostPath: ++ path: C:\var\lib\kubelet\plugins_registry\ ++ type: Directory ++ - name: kubelet-dir ++ hostPath: ++ path: C:\var\lib\kubelet\ ++ type: Directory ++ - name: plugin-dir ++ hostPath: ++ path: C:\var\lib\kubelet\plugins\disk.csi.azure.com\ ++ type: DirectoryOrCreate ++ - name: azure-config ++ hostPath: ++ path: C:\k ++ type: DirectoryOrCreate +helmfile-tests, disk.csi.azure.com, CSIDriver (storage.k8s.io) has been added: +- ++ # Source: azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml ++ apiVersion: storage.k8s.io/v1 ++ kind: CSIDriver ++ metadata: ++ name: disk.csi.azure.com ++ annotations: ++ csiDriver: "v1.23.0" ++ snapshot: "v5.0.1" ++ spec: ++ attachRequired: true ++ podInfoOnMount: false ++ fsGroupPolicy: File +Comparing release=azuredisk-csi-driver, chart=azuredisk-csi-driver/azuredisk-csi-driver diff --git a/test/integration/chart-needs-golden/lint-live b/test/integration/chart-needs-golden/lint-live new file mode 100644 index 00000000..e0fa652c --- /dev/null +++ b/test/integration/chart-needs-golden/lint-live @@ -0,0 +1,8 @@ +"azuredisk-csi-driver" has been added to your repositories +[INFO] Chart.yaml: icon is recommended +[INFO] values.yaml: file does not exist + +1 chart(s) linted, 0 chart(s) failed +[INFO] Chart.yaml: icon is recommended + +1 chart(s) linted, 0 chart(s) failed diff --git a/test/integration/chart-needs-golden/template b/test/integration/chart-needs-golden/template index 90d3c760..5703f036 100644 --- a/test/integration/chart-needs-golden/template +++ b/test/integration/chart-needs-golden/template @@ -5,6 +5,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-csi + namespace: helmfile-tests provisioner: disk.csi.azure.com parameters: skuName: Premium_LRS @@ -18,7 +19,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests labels: app.kubernetes.io/instance: "azuredisk-csi-driver" app.kubernetes.io/managed-by: "Helm" @@ -31,7 +32,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: csi-azuredisk-node-sa - namespace: default + namespace: helmfile-tests labels: app.kubernetes.io/instance: "azuredisk-csi-driver" app.kubernetes.io/managed-by: "Helm" @@ -209,7 +210,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: azuredisk-external-provisioner-role @@ -229,7 +230,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: azuredisk-external-attacher-role @@ -249,7 +250,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: azuredisk-external-snapshotter-role @@ -269,7 +270,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: azuredisk-external-resizer-role @@ -283,7 +284,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: csi-azuredisk-controller-secret-role @@ -297,7 +298,7 @@ metadata: subjects: - kind: ServiceAccount name: csi-azuredisk-node-sa - namespace: default + namespace: helmfile-tests roleRef: kind: ClusterRole name: csi-azuredisk-node-role @@ -308,7 +309,7 @@ kind: DaemonSet apiVersion: apps/v1 metadata: name: csi-azuredisk-node-win - namespace: default + namespace: helmfile-tests labels: app.kubernetes.io/instance: "azuredisk-csi-driver" app.kubernetes.io/managed-by: "Helm" @@ -418,7 +419,7 @@ spec: - "--drivername=disk.csi.azure.com" - "--volume-attach-limit=-1" - "--cloud-config-secret-name=azure-cloud-provider" - - "--cloud-config-secret-namespace=kube-system" + - "--cloud-config-secret-namespace=helmfile-tests" - "--custom-user-agent=" - "--user-agent-suffix=OSS-helm" - "--allow-empty-cloud-config=true" @@ -523,7 +524,7 @@ kind: DaemonSet apiVersion: apps/v1 metadata: name: csi-azuredisk-node - namespace: default + namespace: helmfile-tests labels: app.kubernetes.io/instance: "azuredisk-csi-driver" app.kubernetes.io/managed-by: "Helm" @@ -623,7 +624,7 @@ spec: - "--drivername=disk.csi.azure.com" - "--volume-attach-limit=-1" - "--cloud-config-secret-name=azure-cloud-provider" - - "--cloud-config-secret-namespace=kube-system" + - "--cloud-config-secret-namespace=helmfile-tests" - "--custom-user-agent=" - "--user-agent-suffix=OSS-helm" - "--allow-empty-cloud-config=true" @@ -715,7 +716,7 @@ kind: Deployment apiVersion: apps/v1 metadata: name: csi-azuredisk-controller - namespace: default + namespace: helmfile-tests labels: app.kubernetes.io/instance: "azuredisk-csi-driver" app.kubernetes.io/managed-by: "Helm" @@ -761,7 +762,7 @@ spec: - "--v=2" - "--timeout=15s" - "--leader-election" - - "--leader-election-namespace=default" + - "--leader-election-namespace=helmfile-tests" - "--worker-threads=50" - "--extra-create-metadata=true" - "--strict-topology=true" @@ -784,7 +785,7 @@ spec: - "-csi-address=$(ADDRESS)" - "-timeout=1200s" - "-leader-election" - - "--leader-election-namespace=default" + - "--leader-election-namespace=helmfile-tests" - "-worker-threads=500" env: - name: ADDRESS @@ -803,7 +804,7 @@ spec: args: - "-csi-address=$(ADDRESS)" - "-leader-election" - - "--leader-election-namespace=default" + - "--leader-election-namespace=helmfile-tests" - "-v=2" env: - name: ADDRESS @@ -823,7 +824,7 @@ spec: - "-csi-address=$(ADDRESS)" - "-v=2" - "-leader-election" - - "--leader-election-namespace=default" + - "--leader-election-namespace=helmfile-tests" - '-handle-volume-inuse-error=false' - '-feature-gates=RecoverVolumeExpansionFailure=true' - "-timeout=240s" @@ -865,7 +866,7 @@ spec: - "--vm-type=" - "--drivername=disk.csi.azure.com" - "--cloud-config-secret-name=azure-cloud-provider" - - "--cloud-config-secret-namespace=kube-system" + - "--cloud-config-secret-namespace=helmfile-tests" - "--custom-user-agent=" - "--user-agent-suffix=OSS-helm" - "--allow-empty-cloud-config=false" diff --git a/test/integration/run.sh b/test/integration/run.sh index b56e1fd0..74783e8a 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -16,9 +16,8 @@ if [[ ! -d "${dir}" ]]; then dir="${PWD}"; fi # GLOBALS ----------------------------------------------------------------------------------------------------------- -test_ns="helmfile-tests-$(date +"%Y%m%d-%H%M%S")" +test_ns="helmfile-tests" helmfile="./helmfile ${EXTRA_HELMFILE_FLAGS} --namespace=${test_ns}" -helmfile_no_extra_flags="./helmfile --namespace=${test_ns}" helm="helm --kube-context=minikube" kubectl="kubectl --context=minikube --namespace=${test_ns}" helm_dir="${PWD}/${dir}/.helm" diff --git a/test/integration/templates-golden/v2/helmx/patched_resources.yaml b/test/integration/templates-golden/v2/helmx/patched_resources.yaml index 6af9f074..c245f239 100644 --- a/test/integration/templates-golden/v2/helmx/patched_resources.yaml +++ b/test/integration/templates-golden/v2/helmx/patched_resources.yaml @@ -2,7 +2,7 @@ # Source: helmx/templates/helmx.all.yaml apiVersion: v1 data: - namespace: helmx-system + namespace: helmfile-tests kind: ConfigMap metadata: name: release-namespace diff --git a/test/integration/templates-golden/v3/helmx/patched_resources.yaml b/test/integration/templates-golden/v3/helmx/patched_resources.yaml index f036ace6..27475dc1 100644 --- a/test/integration/templates-golden/v3/helmx/patched_resources.yaml +++ b/test/integration/templates-golden/v3/helmx/patched_resources.yaml @@ -2,7 +2,7 @@ # Source: helmx/templates/helmx.all.yaml apiVersion: v1 data: - namespace: helmx-system + namespace: helmfile-tests kind: ConfigMap metadata: name: release-namespace diff --git a/test/integration/test-cases/chart-needs.sh b/test/integration/test-cases/chart-needs.sh index 66039a13..49fd36d8 100644 --- a/test/integration/test-cases/chart-needs.sh +++ b/test/integration/test-cases/chart-needs.sh @@ -5,6 +5,13 @@ if [[ helm_major_version -eq 3 ]]; then chart_needs_lint_reverse=${chart_needs_tmp}/chart.needs.lint.log chart_needs_diff_reverse=${chart_needs_tmp}/chart.needs.diff.log + lint_out_file=${chart_needs_golden_dir}/lint + diff_out_file=${chart_needs_golden_dir}/diff + if [[ $EXTRA_HELMFILE_FLAGS == *--enable-live-output* ]]; then + lint_out_file=${chart_needs_golden_dir}/lint-live + diff_out_file=${chart_needs_golden_dir}/diff-live + fi + test_start "chart prepare when helmfile template with needs" info "https://github.com/helmfile/helmfile/issues/455" @@ -18,15 +25,15 @@ if [[ helm_major_version -eq 3 ]]; then for i in $(seq 10); do info "Comparing lint/chart-needs #$i" - ${helmfile_no_extra_flags} -f ${dir}/issue.455/helmfile.yaml lint --include-needs | grep -v Linting > ${chart_needs_lint_reverse} || fail "\"helmfile lint\" shouldn't fail" - diff -u ${chart_needs_golden_dir}/lint ${chart_needs_lint_reverse} || fail "\"helmfile lint\" should be consistent" + ${helmfile} -f ${dir}/issue.455/helmfile.yaml lint --include-needs | grep -v Linting > ${chart_needs_lint_reverse} || fail "\"helmfile lint\" shouldn't fail" + diff -u ${lint_out_file} ${chart_needs_lint_reverse} || fail "\"helmfile lint\" should be consistent" echo code=$? done for i in $(seq 10); do info "Comparing diff/chart-needs #$i" - ${helmfile_no_extra_flags} -f ${dir}/issue.455/helmfile.yaml diff --include-needs | grep -Ev "Comparing release=azuredisk-csi-storageclass, chart=/tmp/[0-9a-zA-Z]+/azuredisk-csi-storageclass" | grep -v "$test_ns" > ${chart_needs_diff_reverse} || fail "\"helmfile diff\" shouldn't fail" - diff -u ${chart_needs_golden_dir}/diff ${chart_needs_diff_reverse} || fail "\"helmfile diff\" should be consistent" + ${helmfile} -f ${dir}/issue.455/helmfile.yaml diff --include-needs | grep -Ev "Comparing release=azuredisk-csi-storageclass, chart=/tmp/.*/azuredisk-csi-storageclass" > ${chart_needs_diff_reverse} || fail "\"helmfile diff\" shouldn't fail" + diff -u ${diff_out_file} ${chart_needs_diff_reverse} || fail "\"helmfile diff\" should be consistent" echo code=$? done