shave off some nanoseconds

This commit is contained in:
davidnewhall2 2019-11-25 03:14:14 -08:00
parent e88f889baa
commit 184a20c019
7 changed files with 88 additions and 60 deletions

View File

@ -82,6 +82,13 @@ func descClient(ns string) *uclient {
} }
} }
func (u *unifiCollector) exportClients(clients []*unifi.Client) (e []*metricExports) {
for _, c := range clients {
e = append(e, u.exportClient(c)...)
}
return
}
// CollectClient exports Clients' Data // CollectClient exports Clients' Data
func (u *unifiCollector) exportClient(c *unifi.Client) []*metricExports { func (u *unifiCollector) exportClient(c *unifi.Client) []*metricExports {
labels := []string{c.ID, c.Mac, c.UserID, c.SiteID, c.SiteName, labels := []string{c.ID, c.Mac, c.UserID, c.SiteID, c.SiteName,

View File

@ -3,6 +3,7 @@ package promunifi
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"sync"
"time" "time"
"github.com/davidnewhall/unifi-poller/metrics" "github.com/davidnewhall/unifi-poller/metrics"
@ -51,6 +52,9 @@ type Report struct {
Descs int Descs int
Metrics *metrics.Metrics Metrics *metrics.Metrics
Elapsed time.Duration Elapsed time.Duration
Start time.Time
ch chan []*metricExports
wait sync.WaitGroup
} }
// NewUnifiCollector returns a prometheus collector that will export any available // NewUnifiCollector returns a prometheus collector that will export any available
@ -97,83 +101,65 @@ func (u *unifiCollector) Describe(ch chan<- *prometheus.Desc) {
// Collect satisifes the prometheus Collector. This runs the input method to get // Collect satisifes the prometheus Collector. This runs the input method to get
// the current metrics (from another package) then exports them for prometheus. // the current metrics (from another package) then exports them for prometheus.
func (u *unifiCollector) Collect(ch chan<- prometheus.Metric) { func (u *unifiCollector) Collect(ch chan<- prometheus.Metric) {
start := time.Now() var err error
unifiMetrics, err := u.Config.CollectFn() r := &Report{Start: time.Now(), ch: make(chan []*metricExports)}
if err != nil { defer func() {
r.wait.Wait()
close(r.ch)
}()
if r.Metrics, err = u.Config.CollectFn(); err != nil {
ch <- prometheus.NewInvalidMetric( ch <- prometheus.NewInvalidMetric(
prometheus.NewInvalidDesc(fmt.Errorf("metric fetch failed")), err) prometheus.NewInvalidDesc(fmt.Errorf("metric fetch failed")), err)
return return
} }
descs := make(map[*prometheus.Desc]bool) // used as a counter go u.exportMetrics(ch, r)
r := &Report{Metrics: unifiMetrics}
if u.Config.LoggingFn != nil {
defer func() {
r.Elapsed = time.Since(start)
r.Descs = len(descs)
u.Config.LoggingFn(r)
}()
}
export := func(metrics []*metricExports) { r.wait.Add(2)
count, errors := u.export(ch, metrics) go func() { r.ch <- u.exportClients(r.Metrics.Clients) }()
r.Total += count go func() { r.ch <- u.exportSites(r.Metrics.Sites) }()
r.Errors += errors
for _, d := range metrics {
descs[d.Desc] = true
}
}
for _, asset := range r.Metrics.Clients {
export(u.exportClient(asset))
}
for _, asset := range r.Metrics.Sites {
export(u.exportSite(asset))
}
if r.Metrics.Devices == nil { if r.Metrics.Devices == nil {
return return
} }
for _, asset := range r.Metrics.Devices.UAPs { r.wait.Add(4)
export(u.exportUAP(asset)) go func() { r.ch <- u.exportUAPs(r.Metrics.Devices.UAPs) }()
} go func() { r.ch <- u.exportUSGs(r.Metrics.Devices.USGs) }()
for _, asset := range r.Metrics.Devices.USGs { go func() { r.ch <- u.exportUSWs(r.Metrics.Devices.USWs) }()
export(u.exportUSG(asset)) go func() { r.ch <- u.exportUDMs(r.Metrics.Devices.UDMs) }()
}
for _, asset := range r.Metrics.Devices.USWs {
export(u.exportUSW(asset))
}
for _, asset := range r.Metrics.Devices.UDMs {
export(u.exportUDM(asset))
}
} }
func (u *unifiCollector) export(ch chan<- prometheus.Metric, exports []*metricExports) (count, errors int) { // Call this once (at least as-is). It sets all the counters and runs the logging function.
for _, e := range exports { func (u *unifiCollector) exportMetrics(ch chan<- prometheus.Metric, r *Report) {
var val float64 descs := make(map[*prometheus.Desc]bool) // used as a counter
for newMetrics := range r.ch {
for _, m := range newMetrics {
r.Total++
descs[m.Desc] = true
switch v := e.Value.(type) { switch v := m.Value.(type) {
case float64: case float64:
val = v ch <- prometheus.MustNewConstMetric(m.Desc, m.ValueType, v, m.Labels...)
case int64:
ch <- prometheus.MustNewConstMetric(m.Desc, m.ValueType, float64(v), m.Labels...)
case unifi.FlexInt:
ch <- prometheus.MustNewConstMetric(m.Desc, m.ValueType, v.Val, m.Labels...)
case int64: default:
val = float64(v) r.Errors++
if u.Config.ReportErrors {
case unifi.FlexInt: ch <- prometheus.NewInvalidMetric(m.Desc, fmt.Errorf("not a number: %v", m.Value))
val = v.Val }
default:
errors++
if u.Config.ReportErrors {
ch <- prometheus.NewInvalidMetric(e.Desc, fmt.Errorf("not a number: %v", e.Value))
} }
continue
} }
r.wait.Done()
count++
ch <- prometheus.MustNewConstMetric(e.Desc, e.ValueType, val, e.Labels...)
} }
return if u.Config.LoggingFn == nil {
return
}
r.Descs, r.Elapsed = len(descs), time.Since(r.Start)
u.Config.LoggingFn(r)
} }

View File

@ -67,6 +67,13 @@ func descSite(ns string) *site {
} }
} }
func (u *unifiCollector) exportSites(sites unifi.Sites) (e []*metricExports) {
for _, s := range sites {
e = append(e, u.exportSite(s)...)
}
return
}
// exportSite exports Network Site Data // exportSite exports Network Site Data
func (u *unifiCollector) exportSite(s *unifi.Site) []*metricExports { func (u *unifiCollector) exportSite(s *unifi.Site) []*metricExports {
labels := []string{s.Name, s.Desc, s.SiteName} labels := []string{s.Name, s.Desc, s.SiteName}

View File

@ -102,6 +102,13 @@ func descUAP(ns string) *uap {
} }
} }
func (u *unifiCollector) exportUAPs(uaps []*unifi.UAP) (e []*metricExports) {
for _, a := range uaps {
e = append(e, u.exportUAP(a)...)
}
return
}
// exportUAP exports Access Point Data // exportUAP exports Access Point Data
func (u *unifiCollector) exportUAP(a *unifi.UAP) []*metricExports { func (u *unifiCollector) exportUAP(a *unifi.UAP) []*metricExports {
labels := []string{a.SiteName, a.Mac, a.Model, a.Name, a.Serial, a.SiteID, labels := []string{a.SiteName, a.Mac, a.Model, a.Name, a.Serial, a.SiteID,

View File

@ -11,6 +11,13 @@ func descUDM(ns string) *udm {
return &udm{} return &udm{}
} }
func (u *unifiCollector) exportUDMs(udms []*unifi.UDM) (e []*metricExports) {
for _, d := range udms {
e = append(e, u.exportUDM(d)...)
}
return
}
// exportUDM exports UniFi Dream Machine (and Pro) Data // exportUDM exports UniFi Dream Machine (and Pro) Data
func (u *unifiCollector) exportUDM(d *unifi.UDM) []*metricExports { func (u *unifiCollector) exportUDM(d *unifi.UDM) []*metricExports {
return nil return nil

View File

@ -109,6 +109,13 @@ func descUSG(ns string) *usg {
} }
} }
func (u *unifiCollector) exportUSGs(usgs []*unifi.USG) (e []*metricExports) {
for _, sg := range usgs {
e = append(e, u.exportUSG(sg)...)
}
return
}
// exportUSG Exports Security Gateway Data // exportUSG Exports Security Gateway Data
// uplink and port tables structs are ignored. that data should be in other exported fields. // uplink and port tables structs are ignored. that data should be in other exported fields.
func (u *unifiCollector) exportUSG(s *unifi.USG) []*metricExports { func (u *unifiCollector) exportUSG(s *unifi.USG) []*metricExports {

View File

@ -136,6 +136,13 @@ func descUSW(ns string) *usw {
} }
} }
func (u *unifiCollector) exportUSWs(usws []*unifi.USW) (e []*metricExports) {
for _, sw := range usws {
e = append(e, u.exportUSW(sw)...)
}
return
}
// exportUSW exports Network Switch Data // exportUSW exports Network Switch Data
func (u *unifiCollector) exportUSW(s *unifi.USW) []*metricExports { func (u *unifiCollector) exportUSW(s *unifi.USW) []*metricExports {
labels := []string{s.Type, s.Version, s.DeviceID, s.IP, labels := []string{s.Type, s.Version, s.DeviceID, s.IP,