Create DeleteWait and DeleteTimeout parameters for Destroy (#1177)
* Create DeleteWait and DeleteTimeout parameters Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> * Create tests for deleteWait and deleteTimeout Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> * build(deps): bump github.com/aws/aws-sdk-go from 1.48.6 to 1.48.7 (#1176) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.48.6 to 1.48.7. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.48.6...v1.48.7) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Update temp_test.go with DeleteWait and DeleteTimeout Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> * Create deleteWait function in state.go Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> * Fix comments from review Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> --------- Signed-off-by: Virginia Tavares <briosovirginia@gmail.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: Virginia Tavares <virginia.tavares@ericsson.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
4659b2fdd8
commit
b10692dc9b
|
|
@ -37,6 +37,8 @@ func NewDeleteCmd(globalCfg *config.GlobalImpl) *cobra.Command {
|
|||
f.IntVar(&deleteOptions.Concurrency, "concurrency", 0, "maximum number of concurrent helm processes to run, 0 is unlimited")
|
||||
f.BoolVar(&deleteOptions.Purge, "purge", false, "purge releases i.e. free release names and histories")
|
||||
f.BoolVar(&deleteOptions.SkipCharts, "skip-charts", false, "don't prepare charts when deleting releases")
|
||||
f.BoolVar(&deleteOptions.DeleteWait, "deleteWait", false, `override helmDefaults.wait setting "helm uninstall --wait"`)
|
||||
f.IntVar(&deleteOptions.DeleteTimeout, "deleteTimeout", 300, `time in seconds to wait for helm uninstall, default: 300`)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ func NewDestroyCmd(globalCfg *config.GlobalImpl) *cobra.Command {
|
|||
f.StringVar(&destroyOptions.Cascade, "cascade", "", "pass cascade to helm exec, default: background")
|
||||
f.IntVar(&destroyOptions.Concurrency, "concurrency", 0, "maximum number of concurrent helm processes to run, 0 is unlimited")
|
||||
f.BoolVar(&destroyOptions.SkipCharts, "skip-charts", false, "don't prepare charts when destroying releases")
|
||||
f.BoolVar(&destroyOptions.DeleteWait, "deleteWait", false, `override helmDefaults.wait setting "helm uninstall --wait"`)
|
||||
f.IntVar(&destroyOptions.DeleteTimeout, "deleteTimeout", 300, `time in seconds to wait for helm uninstall, default: 300`)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -221,6 +221,10 @@ helmDefaults:
|
|||
cascade: "background"
|
||||
# insecureSkipTLSVerify is true if the TLS verification should be skipped when fetching remote chart
|
||||
insecureSkipTLSVerify: false
|
||||
# --wait flag for destroy/delete, if set to true, will wait until all resources are deleted before mark delete command as successful
|
||||
deleteWait: false
|
||||
# Timeout is the time in seconds to wait for helmfile destroy/delete (default 300)
|
||||
deleteTimeout: 300
|
||||
|
||||
# these labels will be applied to all releases in a Helmfile. Useful in templating if you have a helmfile per environment or customer and don't want to copy the same label to each release
|
||||
commonLabels:
|
||||
|
|
|
|||
|
|
@ -477,6 +477,8 @@ func (a *App) Delete(c DeleteConfigProvider) error {
|
|||
SkipRepos: c.SkipDeps(),
|
||||
SkipDeps: c.SkipDeps(),
|
||||
Concurrency: c.Concurrency(),
|
||||
DeleteWait: c.DeleteWait(),
|
||||
DeleteTimeout: c.DeleteTimeout(),
|
||||
}, func() {
|
||||
ok, errs = a.delete(run, c.Purge(), c)
|
||||
})
|
||||
|
|
@ -498,6 +500,8 @@ func (a *App) Destroy(c DestroyConfigProvider) error {
|
|||
SkipRepos: c.SkipDeps(),
|
||||
SkipDeps: c.SkipDeps(),
|
||||
Concurrency: c.Concurrency(),
|
||||
DeleteWait: c.DeleteWait(),
|
||||
DeleteTimeout: c.DeleteTimeout(),
|
||||
}, func() {
|
||||
ok, errs = a.delete(run, true, c)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -159,6 +159,8 @@ type DeleteConfigProvider interface {
|
|||
Purge() bool
|
||||
SkipDeps() bool
|
||||
SkipCharts() bool
|
||||
DeleteWait() bool
|
||||
DeleteTimeout() int
|
||||
|
||||
interactive
|
||||
loggingConfig
|
||||
|
|
@ -171,6 +173,8 @@ type DestroyConfigProvider interface {
|
|||
|
||||
SkipDeps() bool
|
||||
SkipCharts() bool
|
||||
DeleteWait() bool
|
||||
DeleteTimeout() int
|
||||
|
||||
interactive
|
||||
loggingConfig
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ func TestDestroy_2(t *testing.T) {
|
|||
upgraded []exectest.Release
|
||||
deleted []exectest.Release
|
||||
log string
|
||||
deleteWait bool
|
||||
deleteTimeout int
|
||||
}
|
||||
|
||||
check := func(t *testing.T, tc testcase) {
|
||||
|
|
@ -77,6 +79,8 @@ func TestDestroy_2(t *testing.T) {
|
|||
concurrency: tc.concurrency,
|
||||
logger: logger,
|
||||
includeTransitiveNeeds: false,
|
||||
deleteWait: tc.deleteWait,
|
||||
deleteTimeout: tc.deleteTimeout,
|
||||
})
|
||||
|
||||
switch {
|
||||
|
|
@ -455,6 +459,9 @@ database 4 Fri Nov 1 08:40:07 2019 DEPLOYED mysql-3.1.0 3.1.0 def
|
|||
anotherbackend 4 Fri Nov 1 08:40:07 2019 DEPLOYED anotherbackend-3.1.0 3.1.0 default
|
||||
`,
|
||||
},
|
||||
// Enable wait and set timeout for destroy
|
||||
deleteWait: true,
|
||||
deleteTimeout: 300,
|
||||
// Disable concurrency to avoid in-deterministic result
|
||||
concurrency: 1,
|
||||
upgraded: []exectest.Release{},
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ type destroyConfig struct {
|
|||
logger *zap.SugaredLogger
|
||||
includeTransitiveNeeds bool
|
||||
skipCharts bool
|
||||
deleteWait bool
|
||||
deleteTimeout int
|
||||
}
|
||||
|
||||
func (d destroyConfig) Args() string {
|
||||
|
|
@ -75,6 +77,14 @@ func (d destroyConfig) IncludeTransitiveNeeds() bool {
|
|||
return d.includeTransitiveNeeds
|
||||
}
|
||||
|
||||
func (d destroyConfig) DeleteWait() bool {
|
||||
return d.deleteWait
|
||||
}
|
||||
|
||||
func (d destroyConfig) DeleteTimeout() int {
|
||||
return d.deleteTimeout
|
||||
}
|
||||
|
||||
func TestDestroy(t *testing.T) {
|
||||
type testcase struct {
|
||||
ns string
|
||||
|
|
@ -87,6 +97,8 @@ func TestDestroy(t *testing.T) {
|
|||
upgraded []exectest.Release
|
||||
deleted []exectest.Release
|
||||
log string
|
||||
deleteWait bool
|
||||
deleteTimeout int
|
||||
}
|
||||
|
||||
check := func(t *testing.T, tc testcase) {
|
||||
|
|
@ -300,6 +312,9 @@ anotherbackend 4 Fri Nov 1 08:40:07 2019 DEPLOYED anotherbackend-3.1.0
|
|||
},
|
||||
// Disable concurrency to avoid in-deterministic result
|
||||
concurrency: 1,
|
||||
// Enable wait and set timeout for destroy
|
||||
deleteWait: true,
|
||||
deleteTimeout: 300,
|
||||
upgraded: []exectest.Release{},
|
||||
deleted: []exectest.Release{
|
||||
{Name: "frontend-v3", Flags: []string{}},
|
||||
|
|
@ -748,6 +763,9 @@ changing working directory back to "/path/to"
|
|||
},
|
||||
// Disable concurrency to avoid in-deterministic result
|
||||
concurrency: 1,
|
||||
// Enable wait and set timeout for destroy
|
||||
deleteWait: true,
|
||||
deleteTimeout: 300,
|
||||
upgraded: []exectest.Release{},
|
||||
deleted: []exectest.Release{
|
||||
{Name: "frontend-v1", Flags: []string{}},
|
||||
|
|
|
|||
|
|
@ -11,6 +11,10 @@ type DeleteOptions struct {
|
|||
SkipCharts bool
|
||||
// Cascade '--cascade' to helmv3 delete, available values: background, foreground, or orphan, default: background
|
||||
Cascade string
|
||||
// Wait '--wait' if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout
|
||||
DeleteWait bool
|
||||
// Timeout '--timeout', to wait for helm delete operation (default 5m0s)
|
||||
DeleteTimeout int
|
||||
}
|
||||
|
||||
// NewDeleteOptions creates a new Apply
|
||||
|
|
@ -51,3 +55,13 @@ func (c *DeleteImpl) SkipCharts() bool {
|
|||
func (c *DeleteImpl) Cascade() string {
|
||||
return c.DeleteOptions.Cascade
|
||||
}
|
||||
|
||||
// DeleteWait returns the wait flag
|
||||
func (c *DeleteImpl) DeleteWait() bool {
|
||||
return c.DeleteOptions.DeleteWait
|
||||
}
|
||||
|
||||
// DeleteTimeout returns the timeout flag
|
||||
func (c *DeleteImpl) DeleteTimeout() int {
|
||||
return c.DeleteOptions.DeleteTimeout
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,10 @@ type DestroyOptions struct {
|
|||
SkipCharts bool
|
||||
// Cascade '--cascade' to helmv3 delete, available values: background, foreground, or orphan, default: background
|
||||
Cascade string
|
||||
// Wait '--wait' if set, will wait until all the resources are destroyed before returning. It will wait for as long as --timeout
|
||||
DeleteWait bool
|
||||
// Timeout '--timeout', to wait for helm operation (default 5m0s)
|
||||
DeleteTimeout int
|
||||
}
|
||||
|
||||
// NewDestroyOptions creates a new Apply
|
||||
|
|
@ -43,3 +47,13 @@ func (c *DestroyImpl) SkipCharts() bool {
|
|||
func (c *DestroyImpl) Cascade() string {
|
||||
return c.DestroyOptions.Cascade
|
||||
}
|
||||
|
||||
// DeleteWait returns the wait flag
|
||||
func (c *DestroyImpl) DeleteWait() bool {
|
||||
return c.DestroyOptions.DeleteWait
|
||||
}
|
||||
|
||||
// DeleteTimeout returns the timeout flag
|
||||
func (c *DestroyImpl) DeleteTimeout() int {
|
||||
return c.DestroyOptions.DeleteTimeout
|
||||
}
|
||||
|
|
|
|||
|
|
@ -192,6 +192,10 @@ type HelmSpec struct {
|
|||
DisableOpenAPIValidation *bool `yaml:"disableOpenAPIValidation,omitempty"`
|
||||
// InsecureSkipTLSVerify is true if the TLS verification should be skipped when fetching remote chart
|
||||
InsecureSkipTLSVerify bool `yaml:"insecureSkipTLSVerify,omitempty"`
|
||||
// Wait, if set to true, will wait until all resources are deleted before mark delete command as successful
|
||||
DeleteWait bool `yaml:"deleteWait"`
|
||||
// Timeout is the time in seconds to wait for helmfile delete command (default 300)
|
||||
DeleteTimeout int `yaml:"deleteTimeout"`
|
||||
}
|
||||
|
||||
// RepositorySpec that defines values for a helm repo
|
||||
|
|
@ -370,6 +374,11 @@ type ReleaseSpec struct {
|
|||
|
||||
// SuppressDiff skip the helm diff output. Useful for charts which produces large not helpful diff.
|
||||
SuppressDiff *bool `yaml:"suppressDiff,omitempty"`
|
||||
|
||||
// --wait flag for destroy/delete, if set to true, will wait until all resources are deleted before mark delete command as successful
|
||||
DeleteWait *bool `yaml:"deleteWait,omitempty"`
|
||||
// Timeout is the time in seconds to wait for helmfile delete command (default 300)
|
||||
DeleteTimeout *int `yaml:"deleteTimeout,omitempty"`
|
||||
}
|
||||
|
||||
func (r *Inherits) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
|
|
@ -765,6 +774,22 @@ func ReleaseToID(r *ReleaseSpec) string {
|
|||
return id
|
||||
}
|
||||
|
||||
func (st *HelmState) appendDeleteWaitFlags(args []string, release *ReleaseSpec) []string {
|
||||
if release.DeleteWait != nil && *release.DeleteWait || release.DeleteWait == nil && st.HelmDefaults.DeleteWait {
|
||||
args = append(args, "--wait")
|
||||
timeout := st.HelmDefaults.DeleteTimeout
|
||||
if release.DeleteTimeout != nil {
|
||||
timeout = *release.DeleteTimeout
|
||||
}
|
||||
if timeout != 0 {
|
||||
duration := strconv.Itoa(timeout)
|
||||
duration += "s"
|
||||
args = append(args, "--timeout", duration)
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// DeleteReleasesForSync deletes releases that are marked for deletion
|
||||
func (st *HelmState) DeleteReleasesForSync(affectedReleases *AffectedReleases, helm helmexec.Interface, workerLimit int, cascade string) []error {
|
||||
errs := []error{}
|
||||
|
|
@ -800,6 +825,7 @@ func (st *HelmState) DeleteReleasesForSync(affectedReleases *AffectedReleases, h
|
|||
if release.Namespace != "" {
|
||||
args = append(args, "--namespace", release.Namespace)
|
||||
}
|
||||
args = st.appendDeleteWaitFlags(args, release)
|
||||
args = st.appendConnectionFlags(args, release)
|
||||
deletionFlags := st.appendCascadeFlags(args, helm, release, cascade)
|
||||
|
||||
|
|
@ -1047,6 +1073,9 @@ type ChartPrepareOptions struct {
|
|||
Concurrency int
|
||||
KubeVersion string
|
||||
Set []string
|
||||
// Delete wait
|
||||
DeleteWait bool
|
||||
DeleteTimeout int
|
||||
}
|
||||
|
||||
type chartPrepareResult struct {
|
||||
|
|
@ -2063,10 +2092,10 @@ func (st *HelmState) DeleteReleases(affectedReleases *AffectedReleases, helm hel
|
|||
flags := make([]string, 0)
|
||||
flags = st.appendConnectionFlags(flags, &release)
|
||||
flags = st.appendCascadeFlags(flags, helm, &release, cascade)
|
||||
flags = st.appendDeleteWaitFlags(flags, &release)
|
||||
if release.Namespace != "" {
|
||||
flags = append(flags, "--namespace", release.Namespace)
|
||||
}
|
||||
|
||||
context := st.createHelmContext(&release, workerIndex)
|
||||
|
||||
start := time.Now()
|
||||
|
|
|
|||
|
|
@ -2590,7 +2590,28 @@ func TestHelmState_Delete(t *testing.T) {
|
|||
namespace string
|
||||
kubeContext string
|
||||
defKubeContext string
|
||||
deleteWait bool
|
||||
deleteTimeout int
|
||||
}{
|
||||
{
|
||||
name: "delete wait enabled",
|
||||
deleteWait: true,
|
||||
wantErr: false,
|
||||
desired: boolValue(true),
|
||||
installed: true,
|
||||
purge: false,
|
||||
deleted: []exectest.Release{{Name: "releaseA", Flags: []string{"--wait"}}},
|
||||
},
|
||||
{
|
||||
name: "delete wait with deleteTimeout",
|
||||
deleteWait: true,
|
||||
deleteTimeout: 800,
|
||||
wantErr: false,
|
||||
desired: boolValue(true),
|
||||
installed: true,
|
||||
purge: false,
|
||||
deleted: []exectest.Release{{Name: "releaseA", Flags: []string{"--wait", "--timeout", "800s"}}},
|
||||
},
|
||||
{
|
||||
name: "desired and installed (purge=false)",
|
||||
wantErr: false,
|
||||
|
|
@ -2722,6 +2743,8 @@ func TestHelmState_Delete(t *testing.T) {
|
|||
ReleaseSetSpec: ReleaseSetSpec{
|
||||
HelmDefaults: HelmSpec{
|
||||
KubeContext: tt.defKubeContext,
|
||||
DeleteWait: tt.deleteWait,
|
||||
DeleteTimeout: tt.deleteTimeout,
|
||||
},
|
||||
Releases: releases,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -38,39 +38,39 @@ func TestGenerateID(t *testing.T) {
|
|||
run(testcase{
|
||||
subject: "baseline",
|
||||
release: ReleaseSpec{Name: "foo", Chart: "incubator/raw"},
|
||||
want: "foo-values-c7464bdc5",
|
||||
want: "foo-values-b5df4fc58",
|
||||
})
|
||||
|
||||
run(testcase{
|
||||
subject: "different bytes content",
|
||||
release: ReleaseSpec{Name: "foo", Chart: "incubator/raw"},
|
||||
data: []byte(`{"k":"v"}`),
|
||||
want: "foo-values-79f8658596",
|
||||
want: "foo-values-5bd95d98d5",
|
||||
})
|
||||
|
||||
run(testcase{
|
||||
subject: "different map content",
|
||||
release: ReleaseSpec{Name: "foo", Chart: "incubator/raw"},
|
||||
data: map[string]any{"k": "v"},
|
||||
want: "foo-values-7996cc88d6",
|
||||
want: "foo-values-5cb8d75d9f",
|
||||
})
|
||||
|
||||
run(testcase{
|
||||
subject: "different chart",
|
||||
release: ReleaseSpec{Name: "foo", Chart: "stable/envoy"},
|
||||
want: "foo-values-7cdb6bd8b6",
|
||||
want: "foo-values-5f6b44cff5",
|
||||
})
|
||||
|
||||
run(testcase{
|
||||
subject: "different name",
|
||||
release: ReleaseSpec{Name: "bar", Chart: "incubator/raw"},
|
||||
want: "bar-values-59cd6576c4",
|
||||
want: "bar-values-546889667f",
|
||||
})
|
||||
|
||||
run(testcase{
|
||||
subject: "specific ns",
|
||||
release: ReleaseSpec{Name: "foo", Chart: "incubator/raw", Namespace: "myns"},
|
||||
want: "myns-foo-values-5d5d46c98d",
|
||||
want: "myns-foo-values-78f4c8794f",
|
||||
})
|
||||
|
||||
for id, n := range ids {
|
||||
|
|
|
|||
Loading…
Reference in New Issue