feat: `helmfiles: <ordered glob patterns of helmfiles>` configuration (#266)

Resolves #247
This commit is contained in:
KUOKA Yusuke 2018-08-31 12:03:18 +09:00 committed by GitHub
parent b70956b5d8
commit 7c793fdb88
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 113 additions and 16 deletions

View File

@ -371,6 +371,76 @@ proxy:
scheme: {{ env "SCHEME" | default "https" }}
```
## Separating helmfile.yaml into multiple independent files
Once your `helmfile.yaml` got to contain too many releases,
split it into multiple yaml files.
Recommended granularity of helmfile.yaml files is "per microservice" or "per team".
And there are two ways to organize your files.
- Single directory
- Glob patterns
### Single directory
`helmfile -f path/to/directory` loads and runs all the yaml files under the specified directory, each file as an independent helmfile.yaml.
The default helmfile directory is `helmfile.d`, that is,
in case helmfile is unable to locate `helmfile.yaml`, it tries to locate `helmfile.d/*.yaml`.
All the yaml files under the specified directory are processed in the alphabetical order. For example, you can use a `<two digit number>-<microservice>.yaml` naming convention to control the sync order.
- `helmfile.d`/
- `00-database.yaml`
- `00-backend.yaml`
- `01-frontend.yaml`
### Glob patterns
In case you want more control over how multiple `helmfile.yaml` files are organized, use `helmfiles:` configuration key in the `helmfile.yaml`:
Suppose you have multiple microservices organized in a Git reposistory that looks like:
- `myteam/` (sometimes it is equivalent to a k8s ns, that is `kube-system` for `clusterops` team)
- `apps/`
- `filebeat/`
- `helmfile.yaml` (no `charts/` exists, because it depends on the stable/filebeat chart hosted on the official helm charts repository)
- `README.md` (each app managed by my team has a dedicated README maintained by the owners of the app)
- `metricbeat/`
- `helmfile.yaml`
- `README.md`
- `elastalert-operator/`
- `helmfile.yaml`
- `README.md`
- `charts/`
- `elastalert-operator/`
- `<the content of the local helm chart>`
The benefits of this structure is that you can run `git diff` to locate in which directory=microservice a git commit has changes.
It allows your CI system to run a workflow for the changed microservice only.
A downside of this is that you don't have an obvious way to sync all microservices at once. That is, you have to run:
```bash
for d in apps/*; do helmfile -f $d diff; if [ $? -eq 2 ]; then helmfile -f $d sync; fi; done
```
At this point, you'll start writing a `Makefile` under `myteam/` so that `make sync-all` will do the job.
It does work, but you can rely on the helmfile's feature instead.
Put `myteam/helmfile.yaml` that looks like:
```yaml
helmfiles:
- apps/*/helmfile.yaml
```
So that you can get rid of the `Makefile` and the bash snippet.
Just run `helmfile sync` inside `myteam/`, and you are done.
All the files are sorted alphabetically per group = array item inside `helmfiles:`, so that you have granular control over ordering, too.
## Using env files
helmfile itself doesn't have an ability to load env files. But you can write some bash script to achieve the goal:

58
main.go
View File

@ -106,7 +106,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
args := args.GetArgs(c.String("args"), state)
if len(args) > 0 {
helm.SetExtraArgs(args...)
@ -139,7 +139,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
args := args.GetArgs(c.String("args"), state)
if len(args) > 0 {
helm.SetExtraArgs(args...)
@ -183,7 +183,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return executeDiffCommand(c, state, helm, c.Bool("detailed-exitcode"))
})
},
@ -208,7 +208,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
args := args.GetArgs(c.String("args"), state)
if len(args) > 0 {
helm.SetExtraArgs(args...)
@ -244,7 +244,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return executeSyncCommand(c, state, helm)
})
},
@ -273,7 +273,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
errs := executeDiffCommand(c, state, helm, true)
// sync only when there are changes
@ -322,7 +322,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
workers := c.Int("concurrency")
args := args.GetArgs(c.String("args"), state)
@ -352,7 +352,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
purge := c.Bool("purge")
args := args.GetArgs(c.String("args"), state)
@ -388,7 +388,7 @@ func main() {
},
},
Action: func(c *cli.Context) error {
return eachDesiredStateDo(c, func(state *state.HelmState, helm helmexec.Interface) []error {
return findAndIterateOverDesiredStatesUsingFlags(c, func(state *state.HelmState, helm helmexec.Interface) []error {
cleanup := c.Bool("cleanup")
timeout := c.Int("timeout")
@ -457,14 +457,23 @@ func executeDiffCommand(c *cli.Context, state *state.HelmState, helm helmexec.In
return state.DiffReleases(helm, values, workers, detailedExitCode)
}
func eachDesiredStateDo(c *cli.Context, converge func(*state.HelmState, helmexec.Interface) []error) error {
fileOrDirPath := c.GlobalString("file")
desiredStateFiles, err := findDesiredStateFiles(fileOrDirPath)
func findAndIterateOverDesiredStatesUsingFlags(c *cli.Context, converge func(*state.HelmState, helmexec.Interface) []error) error {
fileOrDir := c.GlobalString("file")
kubeContext := c.GlobalString("kube-context")
namespace := c.GlobalString("namespace")
selectors := c.GlobalStringSlice("selector")
logger := c.App.Metadata["logger"].(*zap.SugaredLogger)
return findAndIterateOverDesiredStates(fileOrDir, converge, kubeContext, namespace, selectors, logger)
}
func findAndIterateOverDesiredStates(fileOrDir string, converge func(*state.HelmState, helmexec.Interface) []error, kubeContext, namespace string, selectors []string, logger *zap.SugaredLogger) error {
desiredStateFiles, err := findDesiredStateFiles(fileOrDir)
if err != nil {
return err
}
allSelectorNotMatched := true
for _, f := range desiredStateFiles {
logger.Debugf("Processing %s", f)
yamlBuf, err := tmpl.NewFileRenderer(ioutil.ReadFile, "").RenderTemplateFileToBuffer(f)
if err != nil {
return err
@ -472,14 +481,31 @@ func eachDesiredStateDo(c *cli.Context, converge func(*state.HelmState, helmexec
state, helm, noReleases, err := loadDesiredStateFromFile(
yamlBuf.Bytes(),
f,
c.GlobalString("kube-context"),
c.GlobalString("namespace"),
c.GlobalStringSlice("selector"),
c.App.Metadata["logger"].(*zap.SugaredLogger),
kubeContext,
namespace,
selectors,
logger,
)
if err != nil {
return err
}
if len(state.Helmfiles) > 0 {
for _, globPattern := range state.Helmfiles {
matches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("failed processing %s: %v", globPattern, err)
}
sort.Strings(matches)
for _, m := range matches {
if err := findAndIterateOverDesiredStates(m, converge, kubeContext, namespace, selectors, logger); err != nil {
return fmt.Errorf("failed processing %s: %v", globPattern, err)
}
}
}
return nil
}
allSelectorNotMatched = allSelectorNotMatched && noReleases
if noReleases {
continue

View File

@ -24,6 +24,7 @@ type HelmState struct {
BaseChartPath string
FilePath string
HelmDefaults HelmSpec `yaml:"helmDefaults"`
Helmfiles []string `yaml:"helmfiles"`
Context string `yaml:"context"`
DeprecatedReleases []ReleaseSpec `yaml:"charts"`
Namespace string `yaml:"namespace"`