diff --git a/integrations/inputunifi/poller/influx.go b/integrations/inputunifi/poller/influx.go new file mode 100644 index 00000000..1d0af593 --- /dev/null +++ b/integrations/inputunifi/poller/influx.go @@ -0,0 +1,48 @@ +package poller + +import ( + "fmt" + + "github.com/davidnewhall/unifi-poller/influxunifi" + "github.com/davidnewhall/unifi-poller/metrics" + client "github.com/influxdata/influxdb1-client/v2" +) + +// ReportMetrics batches all the metrics and writes them to InfluxDB. +// This creates an InfluxDB writer, and returns an error if the write fails. +func (u *UnifiPoller) ReportMetrics(metrics *metrics.Metrics) error { + // Batch (and send) all the points. + m := &influxunifi.Metrics{Metrics: metrics} + // Make a new Influx Points Batcher. + var err error + m.BatchPoints, err = client.NewBatchPoints(client.BatchPointsConfig{Database: u.Config.InfluxDB}) + if err != nil { + return fmt.Errorf("influx.NewBatchPoints: %v", err) + } + for _, err := range m.ProcessPoints() { + u.LogError(err, "influx.ProcessPoints") + } + if err = u.Influx.Write(m.BatchPoints); err != nil { + return fmt.Errorf("influxdb.Write(points): %v", err) + } + u.LogInfluxReport(m) + return nil +} + +// LogInfluxReport writes a log message after exporting to influxdb. +func (u *UnifiPoller) LogInfluxReport(m *influxunifi.Metrics) { + var fields, points int + for _, p := range m.Points() { + points++ + i, _ := p.Fields() + fields += len(i) + } + idsMsg := "" + if u.Config.CollectIDS { + idsMsg = fmt.Sprintf("IDS Events: %d, ", len(m.IDSList)) + } + u.Logf("UniFi Measurements Recorded. Sites: %d, Clients: %d, "+ + "Wireless APs: %d, Gateways: %d, Switches: %d, %sPoints: %d, Fields: %d", + len(m.Sites), len(m.Clients), len(m.UAPs), + len(m.UDMs)+len(m.USGs), len(m.USWs), idsMsg, points, fields) +} diff --git a/integrations/inputunifi/poller/prometheus.go b/integrations/inputunifi/poller/prometheus.go new file mode 100644 index 00000000..07eeb7a7 --- /dev/null +++ b/integrations/inputunifi/poller/prometheus.go @@ -0,0 +1,39 @@ +package poller + +import ( + "fmt" + "time" + + "github.com/davidnewhall/unifi-poller/metrics" +) + +// ExportMetrics updates the internal metrics provided via +// HTTP at /metrics for prometheus collection. This is run by Prometheus. +func (u *UnifiPoller) ExportMetrics() *metrics.Metrics { + if u.Config.ReAuth { + u.LogDebugf("Re-authenticating to UniFi Controller") + // Some users need to re-auth every interval because the cookie times out. + if err := u.Unifi.Login(); err != nil { + u.LogError(err, "re-authenticating") + return nil + } + } + u.LastCheck = time.Now() + m, err := u.CollectMetrics() + if err != nil { + u.LogErrorf("collecting metrics: %v", err) + return nil + } + u.AugmentMetrics(m) + + idsMsg := "" + if u.Config.CollectIDS { + idsMsg = fmt.Sprintf(", IDS Events: %d, ", len(m.IDSList)) + } + u.Logf("UniFi Measurements Exported. Sites: %d, Clients: %d, "+ + "Wireless APs: %d, Gateways: %d, Switches: %d%s", + len(m.Sites), len(m.Clients), len(m.UAPs), + len(m.UDMs)+len(m.USGs), len(m.USWs), idsMsg) + + return m +} diff --git a/integrations/inputunifi/poller/start.go b/integrations/inputunifi/poller/start.go index 38ac87cc..4ad0a380 100644 --- a/integrations/inputunifi/poller/start.go +++ b/integrations/inputunifi/poller/start.go @@ -11,7 +11,7 @@ import ( "time" "github.com/davidnewhall/unifi-poller/promunifi" - client "github.com/influxdata/influxdb1-client/v2" + influx "github.com/influxdata/influxdb1-client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/pflag" @@ -100,7 +100,7 @@ func (u *UnifiPoller) Run() (err error) { case "prometheus", "exporter": u.Logf("Exporting Measurements at https://%s/metrics for Prometheus", u.Config.HTTPListen) http.Handle("/metrics", promhttp.Handler()) - prometheus.MustRegister(promunifi.NewUnifiCollector(promunifi.UnifiCollectorOpts{ + prometheus.MustRegister(promunifi.NewUnifiCollector(promunifi.UnifiCollectorCnfg{ Namespace: "unifi", CollectFn: u.ExportMetrics, ReportErrors: true, @@ -121,7 +121,7 @@ func (u *UnifiPoller) Run() (err error) { // GetInfluxDB returns an InfluxDB interface. func (u *UnifiPoller) GetInfluxDB() (err error) { - u.Influx, err = client.NewHTTPClient(client.HTTPConfig{ + u.Influx, err = influx.NewHTTPClient(influx.HTTPConfig{ Addr: u.Config.InfluxURL, Username: u.Config.InfluxUser, Password: u.Config.InfluxPass, diff --git a/integrations/inputunifi/poller/unifi.go b/integrations/inputunifi/poller/unifi.go index b9d34011..dd768022 100644 --- a/integrations/inputunifi/poller/unifi.go +++ b/integrations/inputunifi/poller/unifi.go @@ -6,12 +6,37 @@ import ( "strings" "time" - "github.com/davidnewhall/unifi-poller/influxunifi" "github.com/davidnewhall/unifi-poller/metrics" - client "github.com/influxdata/influxdb1-client/v2" "golift.io/unifi" ) +// PollController runs forever, polling UniFi +// and pushing to influx OR exporting for prometheus. +// This is started by Run() after everything checks out. +func (u *UnifiPoller) PollController(process func(*metrics.Metrics) error) error { + interval := u.Config.Interval.Round(time.Second) + log.Printf("[INFO] Everything checks out! Poller started in %v mode, interval: %v", u.Config.Mode, interval) + ticker := time.NewTicker(interval) + for u.LastCheck = range ticker.C { + var err error + if u.Config.ReAuth { + u.LogDebugf("Re-authenticating to UniFi Controller") + // Some users need to re-auth every interval because the cookie times out. + if err = u.Unifi.Login(); err != nil { + u.LogError(err, "re-authenticating") + } + } + if err == nil { + // Only run this if the authentication procedure didn't return error. + _ = u.CollectAndProcess(process) + } + if u.errorCount > 0 { + return fmt.Errorf("too many errors, stopping poller") + } + } + return nil +} + // CheckSites makes sure the list of provided sites exists on the controller. // This does not run in Lambda (run-once) mode. func (u *UnifiPoller) CheckSites() error { @@ -45,33 +70,6 @@ FIRST: return nil } -// PollController runs forever, polling UniFi -// and pushing to influx OR exporting for prometheus. -// This is started by Run() after everything checks out. -func (u *UnifiPoller) PollController(process func(*metrics.Metrics) error) error { - interval := u.Config.Interval.Round(time.Second) - log.Printf("[INFO] Everything checks out! Poller started in %v mode, interval: %v", u.Config.Mode, interval) - ticker := time.NewTicker(interval) - for u.LastCheck = range ticker.C { - var err error - if u.Config.ReAuth { - u.LogDebugf("Re-authenticating to UniFi Controller") - // Some users need to re-auth every interval because the cookie times out. - if err = u.Unifi.Login(); err != nil { - u.LogError(err, "re-authenticating") - } - } - if err == nil { - // Only run this if the authentication procedure didn't return error. - _ = u.CollectAndProcess(process) - } - if u.errorCount > 0 { - return fmt.Errorf("too many errors, stopping poller") - } - } - return nil -} - // CollectAndProcess collects measurements and then passese them into the // provided method. The method is either an http exporter or an influxdb update. // Can be called once or in a ticker loop. This function and all the ones below @@ -89,37 +87,6 @@ func (u *UnifiPoller) CollectAndProcess(process func(*metrics.Metrics) error) er return err } -// ExportMetrics updates the internal metrics provided via -// HTTP at /metrics for prometheus collection. This is run by Prometheus. -func (u *UnifiPoller) ExportMetrics() *metrics.Metrics { - if u.Config.ReAuth { - u.LogDebugf("Re-authenticating to UniFi Controller") - // Some users need to re-auth every interval because the cookie times out. - if err := u.Unifi.Login(); err != nil { - u.LogError(err, "re-authenticating") - return nil - } - } - u.LastCheck = time.Now() - m, err := u.CollectMetrics() - if err != nil { - u.LogErrorf("collecting metrics: %v", err) - return nil - } - u.AugmentMetrics(m) - - idsMsg := "" - if u.Config.CollectIDS { - idsMsg = fmt.Sprintf(", IDS Events: %d, ", len(m.IDSList)) - } - u.Logf("UniFi Measurements Exported. Sites: %d, Clients: %d, "+ - "Wireless APs: %d, Gateways: %d, Switches: %d%s", - len(m.Sites), len(m.Clients), len(m.UAPs), - len(m.UDMs)+len(m.USGs), len(m.USWs), idsMsg) - - return m -} - // CollectMetrics grabs all the measurements from a UniFi controller and returns them. func (u *UnifiPoller) CollectMetrics() (*metrics.Metrics, error) { m := &metrics.Metrics{TS: u.LastCheck} // At this point, it's the Current Check. @@ -173,45 +140,6 @@ func (u *UnifiPoller) AugmentMetrics(metrics *metrics.Metrics) { } } -// ReportMetrics batches all the metrics and writes them to InfluxDB. -// This creates an InfluxDB writer, and returns an error if the write fails. -func (u *UnifiPoller) ReportMetrics(metrics *metrics.Metrics) error { - // Batch (and send) all the points. - m := &influxunifi.Metrics{Metrics: metrics} - // Make a new Influx Points Batcher. - var err error - m.BatchPoints, err = client.NewBatchPoints(client.BatchPointsConfig{Database: u.Config.InfluxDB}) - if err != nil { - return fmt.Errorf("influx.NewBatchPoints: %v", err) - } - for _, err := range m.ProcessPoints() { - u.LogError(err, "influx.ProcessPoints") - } - if err = u.Influx.Write(m.BatchPoints); err != nil { - return fmt.Errorf("influxdb.Write(points): %v", err) - } - u.LogInfluxReport(m) - return nil -} - -// LogInfluxReport writes a log message after exporting to influxdb. -func (u *UnifiPoller) LogInfluxReport(m *influxunifi.Metrics) { - var fields, points int - for _, p := range m.Points() { - points++ - i, _ := p.Fields() - fields += len(i) - } - idsMsg := "" - if u.Config.CollectIDS { - idsMsg = fmt.Sprintf("IDS Events: %d, ", len(m.IDSList)) - } - u.Logf("UniFi Measurements Recorded. Sites: %d, Clients: %d, "+ - "Wireless APs: %d, Gateways: %d, Switches: %d, %sPoints: %d, Fields: %d", - len(m.Sites), len(m.Clients), len(m.UAPs), - len(m.UDMs)+len(m.USGs), len(m.USWs), idsMsg, points, fields) -} - // GetFilteredSites returns a list of sites to fetch data for. // Omits requested but unconfigured sites. Grabs the full list from the // controller and returns the sites provided in the config file. diff --git a/integrations/inputunifi/promunifi/collector.go b/integrations/inputunifi/promunifi/collector.go index 1af860d4..2632019d 100644 --- a/integrations/inputunifi/promunifi/collector.go +++ b/integrations/inputunifi/promunifi/collector.go @@ -9,8 +9,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// UnifiCollectorOpts defines the data needed to collect and report UniFi Metrics. -type UnifiCollectorOpts struct { +// UnifiCollectorCnfg defines the data needed to collect and report UniFi Metrics. +type UnifiCollectorCnfg struct { // If non-empty, each of the collected metrics is prefixed by the // provided string and an underscore ("_"). Namespace string @@ -27,7 +27,7 @@ type UnifiCollectorOpts struct { } type unifiCollector struct { - opts UnifiCollectorOpts + Config UnifiCollectorCnfg Client *client UAP *uap USG *usg @@ -46,13 +46,13 @@ type metricExports struct { // NewUnifiCollector returns a prometheus collector that will export any available // UniFi metrics. You must provide a collection function in the opts. -func NewUnifiCollector(opts UnifiCollectorOpts) prometheus.Collector { +func NewUnifiCollector(opts UnifiCollectorCnfg) prometheus.Collector { if opts.CollectFn == nil { panic("nil collector function") } return &unifiCollector{ - opts: opts, + Config: opts, Client: descClient(opts.Namespace), UAP: descUAP(opts.Namespace), USG: descUSG(opts.Namespace), @@ -84,7 +84,7 @@ func (u *unifiCollector) Describe(ch chan<- *prometheus.Desc) { describe(u.USW) describe(u.UDM) describe(u.Site) - if u.opts.CollectIDS { + if u.Config.CollectIDS { describe(u.IDS) } } @@ -92,7 +92,7 @@ func (u *unifiCollector) Describe(ch chan<- *prometheus.Desc) { // Collect satisifes the prometheus Collector. This runs the input method to get // the current metrics (from another package) then exports them for prometheus. func (u *unifiCollector) Collect(ch chan<- prometheus.Metric) { - m := u.opts.CollectFn() + m := u.Config.CollectFn() if m == nil { return } @@ -103,7 +103,7 @@ func (u *unifiCollector) Collect(ch chan<- prometheus.Metric) { for _, asset := range m.Sites { u.export(ch, u.exportSite(asset), m.TS) } - if u.opts.CollectIDS { + if u.Config.CollectIDS { for _, asset := range m.IDSList { u.export(ch, u.exportIDS(asset), m.TS) } @@ -131,7 +131,7 @@ func (u *unifiCollector) export(ch chan<- prometheus.Metric, exports []*metricEx for _, e := range exports { v, ok := e.Value.(float64) if !ok { - if u.opts.ReportErrors { + if u.Config.ReportErrors { ch <- prometheus.NewInvalidMetric(e.Desc, fmt.Errorf("not a number")) } return