feat: `helmBinary` in helmfile.yaml (#1160)

* feat: `helmBinary` in helmfile.yaml

Resolves #1083

* Add regression test for `helmfile destroy`
This commit is contained in:
KUOKA Yusuke 2020-03-29 17:51:07 +09:00 committed by GitHub
parent f676c61425
commit 69feadc360
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1063 additions and 346 deletions

View File

@ -50,7 +50,7 @@ func main() {
cli.StringFlag{
Name: "helm-binary, b",
Usage: "path to helm binary",
Value: "helm",
Value: app.DefaultHelmBinary,
},
cli.StringFlag{
Name: "file, f",

View File

@ -29,9 +29,10 @@ const (
)
type App struct {
KubeContext string
OverrideKubeContext string
OverrideHelmBinary string
Logger *zap.SugaredLogger
Reverse bool
Env string
Namespace string
Selectors []string
@ -53,14 +54,16 @@ type App struct {
remote *remote.Remote
helmExecer helmexec.Interface
valsRuntime vals.Evaluator
helms map[helmKey]helmexec.Interface
helmsMutex sync.Mutex
}
func New(conf ConfigProvider) *App {
return Init(&App{
KubeContext: conf.KubeContext(),
OverrideKubeContext: conf.KubeContext(),
OverrideHelmBinary: conf.HelmBinary(),
Logger: conf.Logger(),
Env: conf.Env(),
Namespace: conf.Namespace(),
@ -69,9 +72,9 @@ func New(conf ConfigProvider) *App {
FileOrDir: conf.FileOrDir(),
ValuesFiles: conf.StateValuesFiles(),
Set: conf.StateValuesSet(),
helmExecer: helmexec.New(conf.HelmBinary(), conf.Logger(), conf.KubeContext(), &helmexec.ShellRunner{
Logger: conf.Logger(),
}),
//helmExecer: helmexec.New(conf.HelmBinary(), conf.Logger(), conf.KubeContext(), &helmexec.ShellRunner{
// Logger: conf.Logger(),
//}),
})
}
@ -106,12 +109,6 @@ func (a *App) Repos(c ReposConfigProvider) error {
})
}
func (a *App) reverse() *App {
new := *a
new.Reverse = true
return &new
}
func (a *App) DeprecatedSyncCharts(c DeprecatedChartsConfigProvider) error {
return a.ForEachStateFiltered(func(run *Run) []error {
return run.DeprecatedSyncCharts(c)
@ -147,6 +144,10 @@ func (a *App) Apply(c ApplyConfigProvider) error {
mut := &sync.Mutex{}
var opts []LoadOption
opts = append(opts, SetRetainValuesFiles(c.RetainValuesFiles()))
err := a.ForEachState(func(run *Run) (bool, []error) {
matched, updated, errs := a.apply(run, c)
@ -155,7 +156,7 @@ func (a *App) Apply(c ApplyConfigProvider) error {
mut.Unlock()
return matched, errs
}, c.RetainValuesFiles())
}, opts...)
if err != nil {
return err
@ -177,36 +178,36 @@ func (a *App) Status(c StatusesConfigProvider) error {
}
func (a *App) Delete(c DeleteConfigProvider) error {
return a.reverse().ForEachState(func(run *Run) (bool, []error) {
return a.ForEachState(func(run *Run) (bool, []error) {
return a.delete(run, c.Purge(), c)
})
}, SetReverse(true))
}
func (a *App) Destroy(c DestroyConfigProvider) error {
return a.reverse().ForEachState(func(run *Run) (bool, []error) {
return a.ForEachState(func(run *Run) (bool, []error) {
return a.delete(run, true, c)
})
}, SetReverse(true))
}
func (a *App) Test(c TestConfigProvider) error {
if c.Cleanup() && a.helmExecer.IsHelm3() {
return a.ForEachStateFiltered(func(run *Run) []error {
if c.Cleanup() && run.helm.IsHelm3() {
a.Logger.Warnf("warn: requested cleanup will not be applied. " +
"To clean up test resources with Helm 3, you have to remove them manually " +
"or set helm.sh/hook-delete-policy\n")
}
return a.ForEachStateFiltered(func(run *Run) []error {
return run.Test(c)
})
}
func (a *App) PrintState(c StateConfigProvider) error {
return a.ForEachStateFiltered(func(run *Run) []error {
state, err := run.state.ToYaml()
return a.VisitDesiredStatesWithReleasesFiltered(a.FileOrDir, func(st *state.HelmState) []error {
state, err := st.ToYaml()
if err != nil {
return []error{err}
}
fmt.Printf("---\n# Source: %s\n\n%+v", run.state.FilePath, state)
fmt.Printf("---\n# Source: %s\n\n%+v", st.FilePath, state)
return []error{}
})
}
@ -215,9 +216,9 @@ func (a *App) ListReleases(c StateConfigProvider) error {
table := uitable.New()
table.AddRow("NAME", "NAMESPACE", "INSTALLED", "LABELS")
err := a.ForEachStateFiltered(func(run *Run) []error {
err := a.VisitDesiredStatesWithReleasesFiltered(a.FileOrDir, func(st *state.HelmState) []error {
//var releases m
for _, r := range run.state.Releases {
for _, r := range st.Releases {
labels := ""
for k, v := range r.Labels {
labels = fmt.Sprintf("%s,%s:%s", labels, k, v)
@ -266,8 +267,8 @@ func (a *App) within(dir string, do func() error) error {
return appErr
}
func (a *App) visitStateFiles(fileOrDir string, do func(string, string) error) error {
desiredStateFiles, err := a.findDesiredStateFiles(fileOrDir)
func (a *App) visitStateFiles(fileOrDir string, opts LoadOpts, do func(string, string) error) error {
desiredStateFiles, err := a.findDesiredStateFiles(fileOrDir, opts)
if err != nil {
return appError("", err)
}
@ -302,6 +303,11 @@ func (a *App) visitStateFiles(fileOrDir string, do func(string, string) error) e
}
func (a *App) loadDesiredStateFromYaml(file string, opts ...LoadOpts) (*state.HelmState, error) {
var op LoadOpts
if len(opts) > 0 {
op = opts[0]
}
ld := &desiredStateLoader{
readFile: a.readFile,
fileExists: a.fileExists,
@ -310,25 +316,59 @@ func (a *App) loadDesiredStateFromYaml(file string, opts ...LoadOpts) (*state.He
logger: a.Logger,
abs: a.abs,
Reverse: a.Reverse,
KubeContext: a.KubeContext,
overrideKubeContext: a.OverrideKubeContext,
overrideHelmBinary: a.OverrideHelmBinary,
glob: a.glob,
helm: a.helmExecer,
getHelm: a.getHelm,
valsRuntime: a.valsRuntime,
}
var op LoadOpts
if len(opts) > 0 {
op = opts[0]
}
return ld.Load(file, op)
}
func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*state.HelmState, helmexec.Interface) (bool, []error)) error {
type helmKey struct {
Binary string
Context string
}
func createHelmKey(bin, kubectx string) helmKey {
return helmKey{
Binary: bin,
Context: kubectx,
}
}
// GetHelm returns the global helm exec instance for the specified state that is used for helmfile-wise operation
// like decrypting environment secrets.
//
// This is currently used for running all the helm commands for reconciling releases. But this may change in the future
// once we enable each release to have its own helm binary/version.
func (a *App) getHelm(st *state.HelmState) helmexec.Interface {
a.helmsMutex.Lock()
defer a.helmsMutex.Unlock()
if a.helms == nil {
a.helms = map[helmKey]helmexec.Interface{}
}
bin := st.DefaultHelmBinary
kubectx := st.HelmDefaults.KubeContext
key := createHelmKey(bin, kubectx)
if _, ok := a.helms[key]; !ok {
a.helms[key] = helmexec.New(bin, a.Logger, kubectx, &helmexec.ShellRunner{
Logger: a.Logger,
})
}
return a.helms[key]
}
func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*state.HelmState) (bool, []error)) error {
noMatchInHelmfiles := true
err := a.visitStateFiles(fileOrDir, func(f, d string) error {
err := a.visitStateFiles(fileOrDir, defOpts, func(f, d string) error {
opts := defOpts.DeepCopy()
if opts.CalleePath == "" {
@ -355,8 +395,6 @@ func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*sta
ctx := context{app: a, st: st, retainValues: defOpts.RetainValuesFiles}
helm := a.helmExecer
if err != nil {
switch stateLoadErr := err.(type) {
// Addresses https://github.com/roboll/helmfile/issues/279
@ -379,6 +417,8 @@ func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*sta
optsForNestedState := LoadOpts{
CalleePath: filepath.Join(d, f),
Environment: m.Environment,
Reverse: defOpts.Reverse,
RetainValuesFiles: defOpts.RetainValuesFiles,
}
//assign parent selector to sub helm selector in legacy mode or do not inherit in experimental mode
if (m.Selectors == nil && !isExplicitSelectorInheritanceEnabled()) || m.SelectorsInherited {
@ -406,7 +446,7 @@ func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*sta
return appError(fmt.Sprintf("failed executing release templates in \"%s\"", f), tmplErr)
}
processed, errs := converge(templated, helm)
processed, errs := converge(templated)
noMatchInHelmfiles = noMatchInHelmfiles && !processed
return context{app: a, st: templated, retainValues: defOpts.RetainValuesFiles}.clean(errs)
@ -425,7 +465,10 @@ func (a *App) visitStates(fileOrDir string, defOpts LoadOpts, converge func(*sta
func (a *App) ForEachStateFiltered(do func(*Run) []error) error {
ctx := NewContext()
err := a.VisitDesiredStatesWithReleasesFiltered(a.FileOrDir, func(st *state.HelmState, helm helmexec.Interface) []error {
err := a.VisitDesiredStatesWithReleasesFiltered(a.FileOrDir, func(st *state.HelmState) []error {
helm := a.getHelm(st)
run := NewRun(st, helm, ctx)
return do(run)
@ -434,12 +477,30 @@ func (a *App) ForEachStateFiltered(do func(*Run) []error) error {
return err
}
func (a *App) ForEachState(do func(*Run) (bool, []error), retainValues ...bool) error {
type LoadOption func(o *LoadOpts)
var (
SetReverse = func(r bool) func(o *LoadOpts) {
return func(o *LoadOpts) {
o.Reverse = r
}
}
SetRetainValuesFiles = func(r bool) func(o *LoadOpts) {
return func(o *LoadOpts) {
o.RetainValuesFiles = true
}
}
)
func (a *App) ForEachState(do func(*Run) (bool, []error), o ...LoadOption) error {
ctx := NewContext()
err := a.visitStatesWithSelectorsAndRemoteSupport(a.FileOrDir, func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
err := a.visitStatesWithSelectorsAndRemoteSupport(a.FileOrDir, func(st *state.HelmState) (bool, []error) {
helm := a.getHelm(st)
run := NewRun(st, helm, ctx)
return do(run)
}, retainValues...)
}, o...)
return err
}
@ -511,13 +572,13 @@ type Opts struct {
DAGEnabled bool
}
func (a *App) visitStatesWithSelectorsAndRemoteSupport(fileOrDir string, converge func(*state.HelmState, helmexec.Interface) (bool, []error), retainValues ...bool) error {
func (a *App) visitStatesWithSelectorsAndRemoteSupport(fileOrDir string, converge func(*state.HelmState) (bool, []error), opt ...LoadOption) error {
opts := LoadOpts{
Selectors: a.Selectors,
}
if len(retainValues) > 0 {
opts.RetainValuesFiles = retainValues[0]
for _, o := range opt {
o(&opts)
}
envvals := []interface{}{}
@ -557,8 +618,7 @@ func (a *App) visitStatesWithSelectorsAndRemoteSupport(fileOrDir string, converg
return a.visitStates(fileOrDir, opts, converge)
}
func (a *App) Wrap(converge func(*state.HelmState, helmexec.Interface) []error) func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
return func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
func processFilteredReleases(st *state.HelmState, converge func(st *state.HelmState) []error) (bool, []error) {
if len(st.Selectors) > 0 {
err := st.FilterReleases()
if err != nil {
@ -584,39 +644,28 @@ func (a *App) Wrap(converge func(*state.HelmState, helmexec.Interface) []error)
}
}
errs := converge(st, helm)
errs := converge(st)
processed := len(st.Releases) != 0 && len(errs) == 0
return processed, errs
}
}
func (a *App) VisitDesiredStatesWithReleasesFiltered(fileOrDir string, converge func(*state.HelmState, helmexec.Interface) []error) error {
f := a.Wrap(converge)
return a.visitStatesWithSelectorsAndRemoteSupport(fileOrDir, func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
return f(st, helm)
func (a *App) Wrap(converge func(*state.HelmState, helmexec.Interface) []error) func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
return func(st *state.HelmState, helm helmexec.Interface) (bool, []error) {
return processFilteredReleases(st, func(st *state.HelmState) []error {
return converge(st, helm)
})
}
func (a *App) findStateFilesInAbsPaths(specifiedPath string) ([]string, error) {
rels, err := a.findDesiredStateFiles(specifiedPath)
if err != nil {
return rels, err
}
files := make([]string, len(rels))
for i := range rels {
files[i], err = filepath.Abs(rels[i])
if err != nil {
return []string{}, err
}
}
return files, nil
func (a *App) VisitDesiredStatesWithReleasesFiltered(fileOrDir string, converge func(*state.HelmState) []error, o ...LoadOption) error {
return a.visitStatesWithSelectorsAndRemoteSupport(fileOrDir, func(st *state.HelmState) (bool, []error) {
return processFilteredReleases(st, converge)
}, o...)
}
func (a *App) findDesiredStateFiles(specifiedPath string) ([]string, error) {
func (a *App) findDesiredStateFiles(specifiedPath string, opts LoadOpts) ([]string, error) {
path, err := a.remote.Locate(specifiedPath)
if err != nil {
return nil, fmt.Errorf("locate: %v", err)
@ -666,7 +715,7 @@ func (a *App) findDesiredStateFiles(specifiedPath string) ([]string, error) {
if err != nil {
return []string{}, err
}
if a.Reverse {
if opts.Reverse {
sort.Slice(files, func(i, j int) bool {
return files[j] < files[i]
})

View File

@ -44,6 +44,16 @@ func injectFs(app *App, fs *testhelper.TestFs) *App {
return app
}
func expectNoCallsToHelm(app *App) {
if app.helms != nil {
panic("invalid call to expectNoCallsToHelm")
}
app.helms = map[helmKey]helmexec.Interface{
createHelmKey(app.OverrideHelmBinary, app.OverrideKubeContext): &noCallHelmExec{},
}
}
func TestVisitDesiredStatesWithReleasesFiltered_ReleaseOrder(t *testing.T) {
files := map[string]string{
"/path/to/helmfile.yaml": `
@ -70,14 +80,18 @@ releases:
fs := testhelper.NewTestFs(files)
fs.GlobFixtures["/path/to/helmfile.d/a*.yaml"] = []string{"/path/to/helmfile.d/a2.yaml", "/path/to/helmfile.d/a1.yaml"}
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Env: "default",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
actualOrder := []string{}
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
actualOrder = append(actualOrder, st.FilePath)
return []error{}
}
@ -116,13 +130,17 @@ BAZ: 4
fs := testhelper.NewTestFs(files)
fs.GlobFixtures["/path/to/env.*.yaml"] = []string{"/path/to/env.2.yaml", "/path/to/env.1.yaml"}
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Env: "default",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -154,13 +172,17 @@ releases:
}
fs := testhelper.NewTestFs(files)
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Env: "default",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -195,13 +217,17 @@ releases:
}
fs := testhelper.NewTestFs(files)
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Env: "test",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -243,13 +269,17 @@ releases:
}
fs := testhelper.NewTestFs(files)
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Env: "default",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -305,14 +335,18 @@ releases:
fs := testhelper.NewTestFs(files)
fs.GlobFixtures["/path/to/helmfile.d/a*.yaml"] = []string{"/path/to/helmfile.d/a2.yaml", "/path/to/helmfile.d/a1.yaml"}
app := &App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Selectors: []string{fmt.Sprintf("name=%s", testcase.name)},
Namespace: "",
Env: "default",
}
expectNoCallsToHelm(app)
app = injectFs(app, fs)
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -346,7 +380,7 @@ releases:
chart: stable/zipkin
`,
}
noop := func(st *state.HelmState, helm helmexec.Interface) []error {
noop := func(st *state.HelmState) []error {
return []error{}
}
@ -361,12 +395,16 @@ releases:
for _, testcase := range testcases {
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Selectors: []string{},
Env: testcase.name,
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", noop,
)
@ -452,7 +490,7 @@ releases:
t.Run(testcase.label, func(t *testing.T) {
actual := []string{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r.Name)
}
@ -460,13 +498,16 @@ releases:
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(&ctxLogger{label: testcase.label}, "debug"),
Namespace: "",
Selectors: []string{testcase.label},
Env: "default",
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -688,7 +729,7 @@ func runFilterSubHelmFilesTests(testcases []struct {
for _, testcase := range testcases {
actual := []string{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r.Name)
}
@ -696,13 +737,16 @@ func runFilterSubHelmFilesTests(testcases []struct {
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Selectors: []string{testcase.label},
Env: "default",
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -777,16 +821,19 @@ tillerNs: INLINE_TILLER_NS_2
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Namespace: "",
Selectors: []string{},
Env: "default",
}, files)
expectNoCallsToHelm(app)
processed := []state.ReleaseSpec{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
processed = append(processed, r)
}
@ -875,22 +922,26 @@ releases:
for _, testcase := range testcases {
actual := []string{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r.Name)
}
return []error{}
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: testcase.reverse,
Namespace: "",
Selectors: []string{},
Env: "default",
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
SetReverse(testcase.reverse),
)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -932,22 +983,25 @@ bar: "bar1"
for _, testcase := range testcases {
actual := []string{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r.Name)
}
return []error{}
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: false,
Namespace: "",
Selectors: []string{},
Env: "default",
ValuesFiles: []string{"overrides.yaml"},
Set: map[string]interface{}{"bar": "bar2", "baz": "baz1"},
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -1049,22 +1103,25 @@ x:
actual := []state.ReleaseSpec{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r)
}
return []error{}
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: false,
Namespace: "",
Selectors: []string{},
Env: testcase.env,
ValuesFiles: []string{"overrides.yaml"},
Set: map[string]interface{}{"x": map[string]interface{}{"hoge": "hoge_set", "fuga": "fuga_set"}},
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -1098,20 +1155,23 @@ releases:
actual := []state.ReleaseSpec{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r)
}
return []error{}
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: false,
Namespace: "",
Env: "default",
Selectors: []string{},
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -1150,20 +1210,23 @@ releases:
actual := []state.ReleaseSpec{}
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
for _, r := range st.Releases {
actual = append(actual, r)
}
return []error{}
}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: false,
Namespace: "",
Selectors: []string{},
Env: "default",
}, files)
expectNoCallsToHelm(app)
err := app.VisitDesiredStatesWithReleasesFiltered(
"helmfile.yaml", collectReleases,
)
@ -1200,13 +1263,17 @@ func TestLoadDesiredStateFromYaml_DuplicateReleaseName(t *testing.T) {
return yamlContent, nil
}
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
readFile: readFile,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
_, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -1257,15 +1324,19 @@ helmDefaults:
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
readFile: testFs.ReadFile,
glob: testFs.Glob,
abs: testFs.Abs,
fileExistsAt: testFs.FileExistsAt,
fileExists: testFs.FileExists,
KubeContext: "default",
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
@ -1342,6 +1413,7 @@ helmDefaults:
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
fileExists: testFs.FileExists,
glob: testFs.Glob,
@ -1349,6 +1421,9 @@ helmDefaults:
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -1416,6 +1491,7 @@ foo: FOO
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
fileExists: testFs.FileExists,
glob: testFs.Glob,
@ -1423,6 +1499,9 @@ foo: FOO
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -1477,6 +1556,7 @@ foo: FOO
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
fileExists: testFs.FileExists,
glob: testFs.Glob,
@ -1484,6 +1564,9 @@ foo: FOO
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -1556,6 +1639,7 @@ helmDefaults:
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
fileExists: testFs.FileExists,
glob: testFs.Glob,
@ -1563,6 +1647,9 @@ helmDefaults:
Env: "test",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
@ -1627,14 +1714,17 @@ releases:
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
glob: testFs.Glob,
abs: testFs.Abs,
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: true,
}
st, err := app.loadDesiredStateFromYaml(yamlFile)
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(yamlFile, LoadOpts{Reverse: true})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -1681,14 +1771,17 @@ releases:
"/path/to/2.yaml": `bar: ["BAR"]`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
glob: testFs.Glob,
abs: testFs.Abs,
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: true,
}
st, err := app.loadDesiredStateFromYaml(statePath)
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(statePath, LoadOpts{Reverse: true})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -1734,19 +1827,23 @@ releases:
"/path/to/2.yaml": `bar: ["BAR"]`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
glob: testFs.Glob,
abs: testFs.Abs,
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: true,
}
opts := LoadOpts{
CalleePath: statePath,
Environment: state.SubhelmfileEnvironmentSpec{
OverrideValues: []interface{}{tc.overrideValues},
},
Reverse: true,
}
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(statePath, opts)
if err != nil {
@ -1842,14 +1939,17 @@ services:
`,
})
app := &App{
OverrideHelmBinary: DefaultHelmBinary,
readFile: testFs.ReadFile,
glob: testFs.Glob,
abs: testFs.Abs,
Env: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Reverse: true,
}
st, err := app.loadDesiredStateFromYaml(statePath)
expectNoCallsToHelm(app)
st, err := app.loadDesiredStateFromYaml(statePath, LoadOpts{Reverse: true})
if err != nil {
t.Fatalf("unexpected error at %d: %v", i, err)
@ -2084,15 +2184,19 @@ releases:
}
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
helmExecer: helm,
helms: map[helmKey]helmexec.Interface{
createHelmKey("helm", "default"): helm,
},
Namespace: "testNamespace",
valsRuntime: valsRuntime,
}, files)
app.Template(configImpl{set: []string{"foo=a", "bar=b"}})
for i := range wantReleases {
@ -2142,15 +2246,19 @@ releases:
}
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
helmExecer: helm,
helms: map[helmKey]helmexec.Interface{
createHelmKey("helm", "default"): helm,
},
Namespace: "testNamespace",
valsRuntime: valsRuntime,
}, files)
app.Template(configImpl{})
for i := range wantReleases {
@ -3484,12 +3592,15 @@ err: "foo" depends on nonexistent release "bar"
}
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
helmExecer: helm,
helms: map[helmKey]helmexec.Interface{
createHelmKey("helm", "default"): helm,
},
valsRuntime: valsRuntime,
}, tc.files)
@ -3601,13 +3712,15 @@ releases:
logger := helmexec.NewLogger(&buffer, "debug")
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
Namespace: "testNamespace",
}, files)
out := captureStdout(func() {
err := app.PrintState(configImpl{})
assert.NilError(t, err)
@ -3644,9 +3757,10 @@ releases:
logger := helmexec.NewLogger(&buffer, "debug")
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
Namespace: "testNamespace",
@ -3693,13 +3807,17 @@ releases:
logger := helmexec.NewLogger(&buffer, "debug")
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
KubeContext: "default",
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
Namespace: "testNamespace",
}, files)
expectNoCallsToHelm(app)
out := captureStdout(func() {
err := app.ListReleases(configImpl{})
assert.NilError(t, err)
@ -3740,13 +3858,14 @@ releases:
state.SetValue{Name: "name", Value: "val"}}
app := appWithFs(&App{
KubeContext: "default",
OverrideHelmBinary: DefaultHelmBinary,
OverrideKubeContext: "default",
Logger: helmexec.NewLogger(os.Stderr, "debug"),
Env: "default",
}, files)
var specs []state.ReleaseSpec
collectReleases := func(st *state.HelmState, helm helmexec.Interface) []error {
collectReleases := func(st *state.HelmState) []error {
specs = append(specs, st.Releases...)
return nil
}

View File

@ -4,20 +4,24 @@ import (
"bytes"
"errors"
"fmt"
"github.com/roboll/helmfile/pkg/helmexec"
"path/filepath"
"sort"
"github.com/imdario/mergo"
"github.com/roboll/helmfile/pkg/environment"
"github.com/roboll/helmfile/pkg/helmexec"
"github.com/roboll/helmfile/pkg/state"
"github.com/variantdev/vals"
"go.uber.org/zap"
)
const (
DefaultHelmBinary = "helm"
)
type desiredStateLoader struct {
KubeContext string
Reverse bool
overrideKubeContext string
overrideHelmBinary string
env string
namespace string
@ -26,9 +30,9 @@ type desiredStateLoader struct {
fileExists func(string) (bool, error)
abs func(string) (string, error)
glob func(string) ([]string, error)
getHelm func(*state.HelmState) helmexec.Interface
logger *zap.SugaredLogger
helm helmexec.Interface
valsRuntime vals.Evaluator
}
@ -60,7 +64,7 @@ func (ld *desiredStateLoader) Load(f string, opts LoadOpts) (*state.HelmState, e
return nil, err
}
if ld.Reverse {
if opts.Reverse {
rev := func(i, j int) bool {
return j < i
}
@ -68,11 +72,15 @@ func (ld *desiredStateLoader) Load(f string, opts LoadOpts) (*state.HelmState, e
sort.Slice(st.Helmfiles, rev)
}
if ld.KubeContext != "" {
if ld.overrideKubeContext != "" {
if st.HelmDefaults.KubeContext != "" {
return nil, errors.New("err: Cannot use option --kube-context and set attribute helmDefaults.kubeContext.")
}
st.HelmDefaults.KubeContext = ld.KubeContext
st.HelmDefaults.KubeContext = ld.overrideKubeContext
}
if ld.overrideHelmBinary != DefaultHelmBinary || st.DefaultHelmBinary == "" {
st.DefaultHelmBinary = ld.overrideHelmBinary
}
if ld.namespace != "" {
@ -143,7 +151,7 @@ func (ld *desiredStateLoader) loadFileWithOverrides(inheritedEnv, overrodeEnv *e
}
func (a *desiredStateLoader) underlying() *state.StateCreator {
c := state.NewCreator(a.logger, a.readFile, a.fileExists, a.abs, a.glob, a.helm, a.valsRuntime)
c := state.NewCreator(a.logger, a.readFile, a.fileExists, a.abs, a.glob, a.valsRuntime, a.getHelm)
c.LoadFile = a.loadFile
return c
}

461
pkg/app/destroy_test.go Normal file
View File

@ -0,0 +1,461 @@
package app
import (
"bufio"
"bytes"
"github.com/roboll/helmfile/pkg/exectest"
"github.com/roboll/helmfile/pkg/helmexec"
"github.com/roboll/helmfile/pkg/testhelper"
"github.com/variantdev/vals"
"go.uber.org/zap"
"io"
"path/filepath"
"sync"
"testing"
)
type destroyConfig struct {
args string
concurrency int
interactive bool
logger *zap.SugaredLogger
}
func (d destroyConfig) Args() string {
return d.args
}
func (d destroyConfig) Interactive() bool {
return d.interactive
}
func (d destroyConfig) Logger() *zap.SugaredLogger {
return d.logger
}
func (d destroyConfig) Concurrency() int {
return d.concurrency
}
func TestDestroy(t *testing.T) {
testcases := []struct {
name string
loc string
ns string
concurrency int
error string
files map[string]string
selectors []string
lists map[exectest.ListKey]string
diffs map[exectest.DiffKey]error
upgraded []exectest.Release
deleted []exectest.Release
log string
}{
//
// complex test cases for smoke testing
//
{
name: "smoke",
loc: location(),
files: map[string]string{
"/path/to/helmfile.yaml": `
releases:
- name: database
chart: charts/mysql
needs:
- logging
- name: frontend-v1
chart: charts/frontend
installed: false
needs:
- servicemesh
- logging
- backend-v1
- name: frontend-v2
chart: charts/frontend
needs:
- servicemesh
- logging
- backend-v2
- name: frontend-v3
chart: charts/frontend
needs:
- servicemesh
- logging
- backend-v2
- name: backend-v1
chart: charts/backend
installed: false
needs:
- servicemesh
- logging
- database
- anotherbackend
- name: backend-v2
chart: charts/backend
needs:
- servicemesh
- logging
- database
- anotherbackend
- name: anotherbackend
chart: charts/anotherbackend
needs:
- servicemesh
- logging
- database
- name: servicemesh
chart: charts/istio
needs:
- logging
- name: logging
chart: charts/fluent-bit
- name: front-proxy
chart: stable/envoy
`,
},
diffs: map[exectest.DiffKey]error{},
lists: map[exectest.ListKey]string{
exectest.ListKey{Filter: "^frontend-v1$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
`,
exectest.ListKey{Filter: "^frontend-v2$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
frontend-v2 4 Fri Nov 1 08:40:07 2019 DEPLOYED frontend-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^frontend-v3$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
frontend-v3 4 Fri Nov 1 08:40:07 2019 DEPLOYED frontend-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^backend-v1$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
`,
exectest.ListKey{Filter: "^backend-v2$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
backend-v2 4 Fri Nov 1 08:40:07 2019 DEPLOYED backend-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^logging$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
logging 4 Fri Nov 1 08:40:07 2019 DEPLOYED fluent-bit-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^front-proxy$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
front-proxy 4 Fri Nov 1 08:40:07 2019 DEPLOYED envoy-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^servicemesh$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
servicemesh 4 Fri Nov 1 08:40:07 2019 DEPLOYED istio-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^database$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
database 4 Fri Nov 1 08:40:07 2019 DEPLOYED mysql-3.1.0 3.1.0 default
`,
exectest.ListKey{Filter: "^anotherbackend$", Flags: "--kube-contextdefault--deployed--failed--pending"}: `NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
anotherbackend 4 Fri Nov 1 08:40:07 2019 DEPLOYED anotherbackend-3.1.0 3.1.0 default
`,
},
// Disable concurrency to avoid in-deterministic result
concurrency: 1,
upgraded: []exectest.Release{},
deleted: []exectest.Release{
{Name: "frontend-v3", Flags: []string{}},
{Name: "frontend-v2", Flags: []string{}},
{Name: "frontend-v1", Flags: []string{}},
{Name: "backend-v2", Flags: []string{}},
{Name: "backend-v1", Flags: []string{}},
{Name: "anotherbackend", Flags: []string{}},
{Name: "database", Flags: []string{}},
{Name: "servicemesh", Flags: []string{}},
{Name: "front-proxy", Flags: []string{}},
{Name: "logging", Flags: []string{}},
},
log: `processing file "helmfile.yaml" in directory "."
first-pass rendering starting for "helmfile.yaml.part.0": inherited=&{default map[] map[]}, overrode=<nil>
first-pass uses: &{default map[] map[]}
first-pass rendering output of "helmfile.yaml.part.0":
0:
1: releases:
2: - name: database
3: chart: charts/mysql
4: needs:
5: - logging
6: - name: frontend-v1
7: chart: charts/frontend
8: installed: false
9: needs:
10: - servicemesh
11: - logging
12: - backend-v1
13: - name: frontend-v2
14: chart: charts/frontend
15: needs:
16: - servicemesh
17: - logging
18: - backend-v2
19: - name: frontend-v3
20: chart: charts/frontend
21: needs:
22: - servicemesh
23: - logging
24: - backend-v2
25: - name: backend-v1
26: chart: charts/backend
27: installed: false
28: needs:
29: - servicemesh
30: - logging
31: - database
32: - anotherbackend
33: - name: backend-v2
34: chart: charts/backend
35: needs:
36: - servicemesh
37: - logging
38: - database
39: - anotherbackend
40: - name: anotherbackend
41: chart: charts/anotherbackend
42: needs:
43: - servicemesh
44: - logging
45: - database
46: - name: servicemesh
47: chart: charts/istio
48: needs:
49: - logging
50: - name: logging
51: chart: charts/fluent-bit
52: - name: front-proxy
53: chart: stable/envoy
54:
first-pass produced: &{default map[] map[]}
first-pass rendering result of "helmfile.yaml.part.0": {default map[] map[]}
vals:
map[]
defaultVals:[]
second-pass rendering result of "helmfile.yaml.part.0":
0:
1: releases:
2: - name: database
3: chart: charts/mysql
4: needs:
5: - logging
6: - name: frontend-v1
7: chart: charts/frontend
8: installed: false
9: needs:
10: - servicemesh
11: - logging
12: - backend-v1
13: - name: frontend-v2
14: chart: charts/frontend
15: needs:
16: - servicemesh
17: - logging
18: - backend-v2
19: - name: frontend-v3
20: chart: charts/frontend
21: needs:
22: - servicemesh
23: - logging
24: - backend-v2
25: - name: backend-v1
26: chart: charts/backend
27: installed: false
28: needs:
29: - servicemesh
30: - logging
31: - database
32: - anotherbackend
33: - name: backend-v2
34: chart: charts/backend
35: needs:
36: - servicemesh
37: - logging
38: - database
39: - anotherbackend
40: - name: anotherbackend
41: chart: charts/anotherbackend
42: needs:
43: - servicemesh
44: - logging
45: - database
46: - name: servicemesh
47: chart: charts/istio
48: needs:
49: - logging
50: - name: logging
51: chart: charts/fluent-bit
52: - name: front-proxy
53: chart: stable/envoy
54:
merged environment: &{default map[] map[]}
processing 5 groups of releases in this order:
GROUP RELEASES
1 frontend-v3, frontend-v2, frontend-v1
2 backend-v2, backend-v1
3 anotherbackend
4 database, servicemesh
5 front-proxy, logging
processing releases in group 1/5: frontend-v3, frontend-v2, frontend-v1
worker 1/1 started
release "frontend-v3" processed
release "frontend-v2" processed
release "frontend-v1" processed
worker 1/1 finished
processing releases in group 2/5: backend-v2, backend-v1
worker 1/1 started
release "backend-v2" processed
release "backend-v1" processed
worker 1/1 finished
processing releases in group 3/5: anotherbackend
worker 1/1 started
release "anotherbackend" processed
worker 1/1 finished
processing releases in group 4/5: database, servicemesh
worker 1/1 started
release "database" processed
release "servicemesh" processed
worker 1/1 finished
processing releases in group 5/5: front-proxy, logging
worker 1/1 started
release "front-proxy" processed
release "logging" processed
worker 1/1 finished
DELETED RELEASES:
NAME
frontend-v3
frontend-v2
frontend-v1
backend-v2
backend-v1
anotherbackend
database
servicemesh
front-proxy
logging
`,
},
}
for i := range testcases {
tc := testcases[i]
t.Run(tc.name, func(t *testing.T) {
wantUpgrades := tc.upgraded
wantDeletes := tc.deleted
var helm = &exectest.Helm{
FailOnUnexpectedList: true,
FailOnUnexpectedDiff: true,
Lists: tc.lists,
Diffs: tc.diffs,
DiffMutex: &sync.Mutex{},
ChartsMutex: &sync.Mutex{},
ReleasesMutex: &sync.Mutex{},
}
bs := &bytes.Buffer{}
func() {
logReader, logWriter := io.Pipe()
logFlushed := &sync.WaitGroup{}
// Ensure all the log is consumed into `bs` by calling `logWriter.Close()` followed by `logFlushed.Wait()`
logFlushed.Add(1)
go func() {
scanner := bufio.NewScanner(logReader)
for scanner.Scan() {
bs.Write(scanner.Bytes())
bs.WriteString("\n")
}
logFlushed.Done()
}()
defer func() {
// This is here to avoid data-trace on bytes buffer `bs` to capture logs
if err := logWriter.Close(); err != nil {
panic(err)
}
logFlushed.Wait()
}()
logger := helmexec.NewLogger(logWriter, "debug")
valsRuntime, err := vals.New(vals.Options{CacheSize: 32})
if err != nil {
t.Errorf("unexpected error creating vals runtime: %v", err)
}
app := appWithFs(&App{
OverrideHelmBinary: DefaultHelmBinary,
glob: filepath.Glob,
abs: filepath.Abs,
OverrideKubeContext: "default",
Env: "default",
Logger: logger,
helms: map[helmKey]helmexec.Interface{
createHelmKey("helm", "default"): helm,
},
valsRuntime: valsRuntime,
}, tc.files)
if tc.ns != "" {
app.Namespace = tc.ns
}
if tc.selectors != nil {
app.Selectors = tc.selectors
}
destroyErr := app.Destroy(destroyConfig{
// if we check log output, concurrency must be 1. otherwise the test becomes non-deterministic.
concurrency: tc.concurrency,
logger: logger,
})
if tc.error == "" && destroyErr != nil {
t.Fatalf("unexpected error for data defined at %s: %v", tc.loc, destroyErr)
} else if tc.error != "" && destroyErr == nil {
t.Fatalf("expected error did not occur for data defined at %s", tc.loc)
} else if tc.error != "" && destroyErr != nil && tc.error != destroyErr.Error() {
t.Fatalf("invalid error: expected %q, got %q", tc.error, destroyErr.Error())
}
if len(wantUpgrades) > len(helm.Releases) {
t.Fatalf("insufficient number of upgrades: got %d, want %d", len(helm.Releases), len(wantUpgrades))
}
for relIdx := range wantUpgrades {
if wantUpgrades[relIdx].Name != helm.Releases[relIdx].Name {
t.Errorf("releases[%d].name: got %q, want %q", relIdx, helm.Releases[relIdx].Name, wantUpgrades[relIdx].Name)
}
for flagIdx := range wantUpgrades[relIdx].Flags {
if wantUpgrades[relIdx].Flags[flagIdx] != helm.Releases[relIdx].Flags[flagIdx] {
t.Errorf("releaes[%d].flags[%d]: got %v, want %v", relIdx, flagIdx, helm.Releases[relIdx].Flags[flagIdx], wantUpgrades[relIdx].Flags[flagIdx])
}
}
}
if len(wantDeletes) > len(helm.Deleted) {
t.Fatalf("insufficient number of deletes: got %d, want %d", len(helm.Deleted), len(wantDeletes))
}
for relIdx := range wantDeletes {
if wantDeletes[relIdx].Name != helm.Deleted[relIdx].Name {
t.Errorf("releases[%d].name: got %q, want %q", relIdx, helm.Deleted[relIdx].Name, wantDeletes[relIdx].Name)
}
for flagIdx := range wantDeletes[relIdx].Flags {
if wantDeletes[relIdx].Flags[flagIdx] != helm.Deleted[relIdx].Flags[flagIdx] {
t.Errorf("releaes[%d].flags[%d]: got %v, want %v", relIdx, flagIdx, helm.Deleted[relIdx].Flags[flagIdx], wantDeletes[relIdx].Flags[flagIdx])
}
}
}
}()
if tc.log != "" {
actual := bs.String()
diff, exists := testhelper.Diff(tc.log, actual, 3)
if exists {
t.Errorf("unexpected log for data defined %s:\nDIFF\n%s\nEOD", tc.loc, diff)
}
}
})
}
}

View File

@ -13,6 +13,8 @@ type LoadOpts struct {
// CalleePath is the absolute path to the file being loaded
CalleePath string
Reverse bool
}
func (o LoadOpts) DeepCopy() LoadOpts {

84
pkg/app/mocks_test.go Normal file
View File

@ -0,0 +1,84 @@
package app
import "github.com/roboll/helmfile/pkg/helmexec"
type noCallHelmExec struct {
}
func (helm *noCallHelmExec) doPanic() {
panic("unexpected call to helm")
}
func (helm *noCallHelmExec) TemplateRelease(name, chart string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) UpdateDeps(chart string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) BuildDeps(name, chart string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) SetExtraArgs(args ...string) {
helm.doPanic()
return
}
func (helm *noCallHelmExec) SetHelmBinary(bin string) {
helm.doPanic()
return
}
func (helm *noCallHelmExec) AddRepo(name, repository, cafile, certfile, keyfile, username, password string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) UpdateRepo() error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) SyncRelease(context helmexec.HelmContext, name, chart string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) DiffRelease(context helmexec.HelmContext, name, chart string, suppressDiff bool, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) ReleaseStatus(context helmexec.HelmContext, release string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) DeleteRelease(context helmexec.HelmContext, name string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) List(context helmexec.HelmContext, filter string, flags ...string) (string, error) {
helm.doPanic()
return "", nil
}
func (helm *noCallHelmExec) DecryptSecret(context helmexec.HelmContext, name string, flags ...string) (string, error) {
helm.doPanic()
return "", nil
}
func (helm *noCallHelmExec) TestRelease(context helmexec.HelmContext, name string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) Fetch(chart string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) Lint(name, chart string, flags ...string) error {
helm.doPanic()
return nil
}
func (helm *noCallHelmExec) IsHelm3() bool {
helm.doPanic()
return false
}

View File

@ -71,7 +71,6 @@ func New(helmBinary string, logger *zap.SugaredLogger, kubeContext string, runne
runner: runner,
decryptedSecrets: make(map[string]*decryptedSecret),
}
}
func (helm *execer) SetExtraArgs(args ...string) {

View File

@ -4,12 +4,12 @@ import (
"bytes"
"errors"
"fmt"
"github.com/roboll/helmfile/pkg/helmexec"
"io"
"os"
"github.com/imdario/mergo"
"github.com/roboll/helmfile/pkg/environment"
"github.com/roboll/helmfile/pkg/helmexec"
"github.com/roboll/helmfile/pkg/maputil"
"github.com/variantdev/vals"
"go.uber.org/zap"
@ -39,15 +39,16 @@ type StateCreator struct {
fileExists func(string) (bool, error)
abs func(string) (string, error)
glob func(string) ([]string, error)
helm helmexec.Interface
valsRuntime vals.Evaluator
Strict bool
LoadFile func(inheritedEnv *environment.Environment, baseDir, file string, evaluateBases bool) (*HelmState, error)
getHelm func(*HelmState) helmexec.Interface
}
func NewCreator(logger *zap.SugaredLogger, readFile func(string) ([]byte, error), fileExists func(string) (bool, error), abs func(string) (string, error), glob func(string) ([]string, error), helm helmexec.Interface, valsRuntime vals.Evaluator) *StateCreator {
func NewCreator(logger *zap.SugaredLogger, readFile func(string) ([]byte, error), fileExists func(string) (bool, error), abs func(string) (string, error), glob func(string) ([]string, error), valsRuntime vals.Evaluator, getHelm func(*HelmState) helmexec.Interface) *StateCreator {
return &StateCreator{
logger: logger,
readFile: readFile,
@ -55,8 +56,8 @@ func NewCreator(logger *zap.SugaredLogger, readFile func(string) ([]byte, error)
abs: abs,
glob: glob,
Strict: true,
helm: helm,
valsRuntime: valsRuntime,
getHelm: getHelm,
}
}
@ -66,7 +67,6 @@ func (c *StateCreator) Parse(content []byte, baseDir, file string) (*HelmState,
state.FilePath = file
state.basePath = baseDir
state.helm = c.helm
decoder := yaml.NewDecoder(bytes.NewReader(content))
if !c.Strict {
@ -119,7 +119,7 @@ func (c *StateCreator) Parse(content []byte, baseDir, file string) (*HelmState,
func (c *StateCreator) LoadEnvValues(target *HelmState, env string, ctxEnv *environment.Environment, failOnMissingEnv bool) (*HelmState, error) {
state := *target
e, err := state.loadEnvValues(env, failOnMissingEnv, ctxEnv, c.readFile, c.glob)
e, err := c.loadEnvValues(&state, env, failOnMissingEnv, ctxEnv, c.readFile, c.glob)
if err != nil {
return nil, &StateLoadError{fmt.Sprintf("failed to read %s", state.FilePath), err}
}
@ -183,7 +183,7 @@ func (c *StateCreator) loadBases(envValues *environment.Environment, st *HelmSta
return layers[0], nil
}
func (st *HelmState) loadEnvValues(name string, failOnMissingEnv bool, ctxEnv *environment.Environment, readFile func(string) ([]byte, error), glob func(string) ([]string, error)) (*environment.Environment, error) {
func (c *StateCreator) loadEnvValues(st *HelmState, name string, failOnMissingEnv bool, ctxEnv *environment.Environment, readFile func(string) ([]byte, error), glob func(string) ([]string, error)) (*environment.Environment, error) {
envVals := map[string]interface{}{}
envSpec, ok := st.Environments[name]
if ok {
@ -207,7 +207,7 @@ func (st *HelmState) loadEnvValues(name string, failOnMissingEnv bool, ctxEnv *e
envSecretFiles = append(envSecretFiles, resolved...)
}
if err = st.scatterGatherEnvSecretFiles(envSecretFiles, envVals, readFile); err != nil {
if err = c.scatterGatherEnvSecretFiles(st, envSecretFiles, envVals, readFile); err != nil {
return nil, err
}
}
@ -230,7 +230,7 @@ func (st *HelmState) loadEnvValues(name string, failOnMissingEnv bool, ctxEnv *e
return newEnv, nil
}
func (st *HelmState) scatterGatherEnvSecretFiles(envSecretFiles []string, envVals map[string]interface{}, readFile func(string) ([]byte, error)) error {
func (c *StateCreator) scatterGatherEnvSecretFiles(st *HelmState, envSecretFiles []string, envVals map[string]interface{}, readFile func(string) ([]byte, error)) error {
var errs []error
inputs := envSecretFiles
@ -244,7 +244,6 @@ func (st *HelmState) scatterGatherEnvSecretFiles(envSecretFiles []string, envVal
secrets := make(chan string, inputsSize)
results := make(chan secretResult, inputsSize)
helm := st.helm
st.scatterGather(0, inputsSize,
func() {
@ -257,7 +256,7 @@ func (st *HelmState) scatterGatherEnvSecretFiles(envSecretFiles []string, envVal
for path := range secrets {
release := &ReleaseSpec{}
flags := st.appendConnectionFlags([]string{}, release)
decFile, err := helm.DecryptSecret(st.createHelmContext(release, 0), path, flags...)
decFile, err := c.getHelm(st).DecryptSecret(st.createHelmContext(release, 0), path, flags...)
if err != nil {
results <- secretResult{nil, err, path}
continue

View File

@ -33,6 +33,8 @@ type HelmState struct {
basePath string
FilePath string
DefaultHelmBinary string `yaml:"helmBinary,omitempty"`
// DefaultValues is the default values to be overrode by environment values and command-line overrides
DefaultValues []interface{} `yaml:"values,omitempty"`
@ -63,7 +65,6 @@ type HelmState struct {
tempDir func(string, string) (string, error)
runner helmexec.Runner
helm helmexec.Interface
valsRuntime vals.Evaluator
}

View File

@ -78,21 +78,16 @@ func (st *HelmState) iterateOnReleases(helm helmexec.Interface, concurrency int,
func(id int) {
for release := range releases {
err := do(release, id)
st.logger.Debugf("sending result for release: %s\n", release.Name)
st.logger.Debugf("release %q processed", release.Name)
results <- result{release: release, err: err}
st.logger.Debugf("sent result for release: %s\n", release.Name)
}
},
func() {
for i := range inputs {
st.logger.Debugf("receiving result %d", i)
for range inputs {
r := <-results
if r.err != nil {
errs = append(errs, fmt.Errorf("release \"%s\" failed: %v", r.release.Name, r.err))
} else {
st.logger.Debugf("received result for release \"%s\"", r.release.Name)
}
st.logger.Debugf("received result for %d", i)
}
},
)