chore: update dependencies and add advanced kubedog configuration example

Signed-off-by: yxxhero <aiopsclub@163.com>
This commit is contained in:
yxxhero 2026-03-03 18:24:41 +08:00
parent cb901de432
commit 7dd3ca59e3
4 changed files with 223 additions and 30 deletions

View File

@ -0,0 +1,141 @@
# Advanced Kubedog Configuration Examples
# Example 1: Basic kubedog tracking with custom QPS/Burst
releases:
- name: simple-app
namespace: default
chart: ./charts/simple-app
trackMode: kubedog
trackTimeout: 300
trackLogs: true
kubedogQPS: 50
kubedogBurst: 100
---
# Example 2: With resource filtering
releases:
- name: filtered-app
namespace: production
chart: ./charts/complex-app
trackMode: kubedog
trackTimeout: 600
trackLogs: true
trackKinds:
- Deployment
- StatefulSet
skipKinds:
- ConfigMap
- Secret
---
# Example 3: With specific resource tracking
releases:
- name: selective-tracking
namespace: default
chart: ./charts/microservices
trackMode: kubedog
trackResources:
- kind: Deployment
name: api-server
namespace: default
- kind: StatefulSet
name: database
namespace: default
---
# Example 4: Production-grade configuration
releases:
- name: production-app
namespace: production
chart: ./charts/production-app
trackMode: kubedog
trackTimeout: 900
trackLogs: true
kubedogQPS: 30
kubedogBurst: 60
trackKinds:
- Deployment
- StatefulSet
- DaemonSet
- Job
trackResources:
- kind: Deployment
name: frontend
namespace: production
- kind: Deployment
name: backend
namespace: production
- kind: StatefulSet
name: redis
namespace: production
---
# Example 5: With Helm values containing annotations (future feature)
# Note: Annotation support is proposed but not yet implemented
releases:
- name: annotated-app
namespace: default
chart: ./charts/annotated-app
trackMode: kubedog
values:
- values.yaml:
# When annotation support is implemented, these would work:
# metadata:
# annotations:
# helmfile.dev/track-termination-mode: "NonBlocking"
# helmfile.dev/fail-mode: "HopeUntilEndOfDeployProcess"
# helmfile.dev/failures-allowed-per-replica: "2"
# helmfile.dev/log-regex: "^(ERROR|WARN)"
# helmfile.dev/skip-logs-for-containers: "sidecar,init"
# helmfile.dev/show-service-messages: "true"
---
# Example 6: Multi-environment configuration
environments:
production:
values:
- kubedogQPS: 30
- kubedogBurst: 60
- trackTimeout: 900
staging:
values:
- kubedogQPS: 100
- kubedogBurst: 200
- trackTimeout: 300
releases:
- name: multi-env-app
namespace: {{ .Environment.Name }}
chart: ./charts/app
trackMode: kubedog
trackLogs: true
kubedogQPS: {{ .Values.kubedogQPS }}
kubedogBurst: {{ .Values.kubedogBurst }}
trackTimeout: {{ .Values.trackTimeout }}
---
# Example 7: With needs and tracking (tracking happens after dependencies)
releases:
- name: database
namespace: default
chart: ./charts/postgresql
trackMode: kubedog
trackTimeout: 600
- name: backend
namespace: default
chart: ./charts/backend
needs:
- database
trackMode: kubedog
trackTimeout: 300
trackLogs: true
- name: frontend
namespace: default
chart: ./charts/frontend
needs:
- backend
trackMode: kubedog
trackTimeout: 300
trackLogs: true

2
go.mod
View File

@ -25,7 +25,7 @@ require (
github.com/tatsushid/go-prettytable v0.0.0-20141013043238-ed2d14c29939
github.com/tj/assert v0.0.3
github.com/variantdev/dag v1.1.0
github.com/werf/kubedog v0.13.0
github.com/werf/kubedog-for-werf-helm v0.0.0-20241217155728-9d45c48b82b6
github.com/zclconf/go-cty v1.18.0
github.com/zclconf/go-cty-yaml v1.2.0
go.szostok.io/version v1.2.0

4
go.sum
View File

@ -775,8 +775,8 @@ github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ=
github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo=
github.com/variantdev/dag v1.1.0 h1:xodYlSng33KWGvIGMpKUyLcIZRXKiNUx612mZJqYrDg=
github.com/variantdev/dag v1.1.0/go.mod h1:pH1TQsNSLj2uxMo9NNl9zdGy01Wtn+/2MT96BrKmVyE=
github.com/werf/kubedog v0.13.0 h1:ys+GyZbIMqm0r2po0HClbONcEnS5cWSFR2BayIfBqsY=
github.com/werf/kubedog v0.13.0/go.mod h1:Y6pesrIN5uhFKqmHnHSoeW4jmVyZlWPFWv5SjB0rUPg=
github.com/werf/kubedog-for-werf-helm v0.0.0-20241217155728-9d45c48b82b6 h1:lpgQPTCp+wNJfTqJWtR6A5gRA4e4m/eRJFV7V18XCoA=
github.com/werf/kubedog-for-werf-helm v0.0.0-20241217155728-9d45c48b82b6/go.mod h1:PA9xGVKX9Il6sCgvPrcB3/FahRme3bXRz4BuylvAssc=
github.com/werf/logboek v0.6.1 h1:oEe6FkmlKg0z0n80oZjLplj6sXcBeLleCkjfOOZEL2g=
github.com/werf/logboek v0.6.1/go.mod h1:Gez5J4bxekyr6MxTmIJyId1F61rpO+0/V4vjCIEIZmk=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=

View File

@ -9,10 +9,16 @@ import (
"sync"
"time"
"github.com/werf/kubedog/pkg/tracker"
"github.com/werf/kubedog/pkg/trackers/rollout/multitrack"
"github.com/werf/kubedog-for-werf-helm/pkg/tracker"
"github.com/werf/kubedog-for-werf-helm/pkg/trackers/rollout/multitrack"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
"github.com/helmfile/helmfile/pkg/resource"
@ -25,17 +31,28 @@ type cacheKey struct {
burst int
}
type clientCacheEntry struct {
clientSet kubernetes.Interface
dynamicClient dynamic.Interface
restConfig *rest.Config
discovery discovery.CachedDiscoveryInterface
mapper meta.RESTMapper
}
var (
kubeInitMu sync.Mutex
clientCache = make(map[cacheKey]kubernetes.Interface)
clientCache = make(map[cacheKey]clientCacheEntry)
)
type Tracker struct {
logger *zap.SugaredLogger
clientSet kubernetes.Interface
trackOptions *TrackOptions
filter *resource.ResourceFilter
namespace string
logger *zap.SugaredLogger
clientSet kubernetes.Interface
dynamicClient dynamic.Interface
discovery discovery.CachedDiscoveryInterface
mapper meta.RESTMapper
trackOptions *TrackOptions
filter *resource.ResourceFilter
namespace string
}
type TrackerConfig struct {
@ -81,9 +98,9 @@ func NewTracker(config *TrackerConfig) (*Tracker, error) {
return nil, fmt.Errorf("invalid kubedog burst %v: must be >= 1", burst)
}
clientSet, err := getOrCreateClient(config.KubeContext, kubeconfig, qps, burst)
cacheEntry, err := getOrCreateClients(config.KubeContext, kubeconfig, qps, burst)
if err != nil {
return nil, fmt.Errorf("failed to initialize kubernetes client: %w", err)
return nil, fmt.Errorf("failed to initialize kubernetes clients: %w", err)
}
var filter *resource.ResourceFilter
@ -92,15 +109,18 @@ func NewTracker(config *TrackerConfig) (*Tracker, error) {
}
return &Tracker{
logger: logger,
clientSet: clientSet,
trackOptions: options,
filter: filter,
namespace: config.Namespace,
logger: logger,
clientSet: cacheEntry.clientSet,
dynamicClient: cacheEntry.dynamicClient,
discovery: cacheEntry.discovery,
mapper: cacheEntry.mapper,
trackOptions: options,
filter: filter,
namespace: config.Namespace,
}, nil
}
func getOrCreateClient(kubeContext, kubeconfig string, qps float32, burst int) (kubernetes.Interface, error) {
func getOrCreateClients(kubeContext, kubeconfig string, qps float32, burst int) (clientCacheEntry, error) {
key := cacheKey{
kubeContext: kubeContext,
kubeconfig: kubeconfig,
@ -109,9 +129,9 @@ func getOrCreateClient(kubeContext, kubeconfig string, qps float32, burst int) (
}
kubeInitMu.Lock()
if client, ok := clientCache[key]; ok {
if cache, ok := clientCache[key]; ok {
kubeInitMu.Unlock()
return client, nil
return cache, nil
}
kubeInitMu.Unlock()
@ -128,27 +148,43 @@ func getOrCreateClient(kubeContext, kubeconfig string, qps float32, burst int) (
cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
restConfig, err := cc.ClientConfig()
if err != nil {
return nil, fmt.Errorf("failed to load kubeconfig: %w", err)
return clientCacheEntry{}, fmt.Errorf("failed to load kubeconfig: %w", err)
}
restConfig.QPS = qps
restConfig.Burst = burst
client, err := kubernetes.NewForConfig(restConfig)
clientSet, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %w", err)
return clientCacheEntry{}, fmt.Errorf("failed to create kubernetes client: %w", err)
}
dynamicClient, err := dynamic.NewForConfig(restConfig)
if err != nil {
return clientCacheEntry{}, fmt.Errorf("failed to create dynamic client: %w", err)
}
discoveryClient := memory.NewMemCacheClient(clientSet.Discovery())
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
cache := clientCacheEntry{
clientSet: clientSet,
dynamicClient: dynamicClient,
restConfig: restConfig,
discovery: discoveryClient,
mapper: mapper,
}
kubeInitMu.Lock()
defer kubeInitMu.Unlock()
if existingClient, ok := clientCache[key]; ok {
return existingClient, nil
if existingCache, ok := clientCache[key]; ok {
return existingCache, nil
}
clientCache[key] = client
clientCache[key] = cache
return client, nil
return cache, nil
}
func (t *Tracker) TrackResources(ctx context.Context, resources []*resource.Resource) error {
@ -198,16 +234,29 @@ func (t *Tracker) TrackResources(ctx context.Context, resources []*resource.Reso
Namespace: namespace,
SkipLogs: !t.trackOptions.Logs,
})
case "canary":
specs.Canaries = append(specs.Canaries, multitrack.MultitrackSpec{
ResourceName: res.Name,
Namespace: namespace,
SkipLogs: !t.trackOptions.Logs,
})
default:
t.logger.Debugf("Skipping unsupported kind %s for resource %s/%s", res.Kind, namespace, res.Name)
}
}
if len(specs.Deployments)+len(specs.StatefulSets)+len(specs.DaemonSets)+len(specs.Jobs) == 0 {
t.logger.Info("No trackable resources found (only Deployment, StatefulSet, DaemonSet, and Job are supported)")
totalResources := len(specs.Deployments) + len(specs.StatefulSets) +
len(specs.DaemonSets) + len(specs.Jobs) + len(specs.Canaries)
if totalResources == 0 {
t.logger.Info("No trackable resources found (only Deployment, StatefulSet, DaemonSet, Job, and Canary are supported)")
return nil
}
t.logger.Infof("Tracking breakdown: Deployments=%d, StatefulSets=%d, DaemonSets=%d, Jobs=%d, Canaries=%d",
len(specs.Deployments), len(specs.StatefulSets), len(specs.DaemonSets),
len(specs.Jobs), len(specs.Canaries))
opts := multitrack.MultitrackOptions{
Options: tracker.Options{
ParentContext: ctx,
@ -215,6 +264,9 @@ func (t *Tracker) TrackResources(ctx context.Context, resources []*resource.Reso
LogsFromTime: time.Now().Add(-t.trackOptions.LogsSince),
},
StatusProgressPeriod: 5 * time.Second,
DynamicClient: t.dynamicClient,
DiscoveryClient: t.discovery,
Mapper: t.mapper,
}
err := multitrack.Multitrack(t.clientSet, specs, opts)