add integration test guard;

datadog integration test works

influx package tests

update unifi version

golanglint-ci and address *all* issues.

all tests pass

bump unifi version
This commit is contained in:
Cody Lee 2023-07-20 10:12:44 -05:00
parent 8cf461b0f5
commit ffb8579369
No known key found for this signature in database
57 changed files with 2241 additions and 178 deletions

View File

@ -48,12 +48,3 @@ jobs:
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.53
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
args: --timeout=3m
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true

27
.golangci.yaml Normal file
View File

@ -0,0 +1,27 @@
run:
timeout: 3m
linters:
enable:
- wsl
- nlreturn
- tagalign
- revive
- testpackage
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
output:
sort-results: true
issues:
# disable the default limit so we see everything
max-same-issues: 0
max-issues-per-linter: 0
# default enable fix where the linter supports
fix: true

View File

@ -74,3 +74,12 @@ build: clean
clean:
git clean -xdf || true
(docker images -f "dangling=true" -q | xargs docker rmi) || true
lint:
golangci-lint run --fix
test:
go test -timeout=30s ./...
integration-test:
go test -timeout=30m -args=integration ./...

11
go.mod
View File

@ -10,6 +10,7 @@ require (
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/common v0.44.0
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.11.0
golang.org/x/net v0.12.0
golang.org/x/term v0.10.0
@ -20,9 +21,12 @@ require (
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/brianvoe/gofakeit/v6 v6.23.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/tools v0.1.12 // indirect
)
@ -35,8 +39,11 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/unpoller/unifi v0.3.5
github.com/unpoller/unifi v0.3.15
golang.org/x/sys v0.10.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gopkg.in/yaml.v3 v3.0.1
)
// for local iterative development only
// replace github.com/unpoller/unifi => ../unifi

9
go.sum
View File

@ -6,6 +6,8 @@ github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2y
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/brianvoe/gofakeit/v6 v6.23.0 h1:pgVhyWpYq4e0GEVCh2gdZnS/nBX+8SnyTBliHg5xjks=
github.com/brianvoe/gofakeit/v6 v6.23.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@ -69,14 +71,15 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c h1:zqmyTlQyufRC65JnImJ6H1Sf7BDj8bG31EV919NVEQc=
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/unpoller/unifi v0.3.5 h1:C1JSL05TnZoH64wwxRbjv+5iP+GnpQUXz0bZiSJgtt0=
github.com/unpoller/unifi v0.3.5/go.mod h1:Vea8cfAjs6vR8UbZm1AtiRk6AKxp01SxGdbdBTGkM1Y=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/unpoller/unifi v0.3.15 h1:UsN1VhU1Ufy4VLGxQgE5Aq2GUhXgqzPLy2lgvKAEeyU=
github.com/unpoller/unifi v0.3.15/go.mod h1:aubNKie2j5AcqW3G9m/th4G8SSULVgeDEr6gj4SVJzo=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=

View File

@ -57,6 +57,7 @@ func (u *DatadogUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dup
"app_proto": event.AppProto,
"action": event.InnerAlertAction,
}
r.addCount(alarmT)
tagMap = cleanTags(tagMap)

View File

@ -37,10 +37,12 @@ func (u *DatadogUnifi) batchClient(r report, s *unifi.Client) { // nolint: funle
"bssid": s.Bssid,
"ip": s.IP,
}
powerSaveEnabled := 0.0
if s.PowersaveEnabled.Val {
powerSaveEnabled = 1.0
}
data := map[string]float64{
"anomalies": s.Anomalies.Val,
"channel": s.Channel.Val,
@ -84,6 +86,7 @@ func (u *DatadogUnifi) batchClientDPI(r report, v any, appTotal, catTotal totals
s, ok := v.(*unifi.DPITable)
if !ok {
u.LogErrorf("invalid type given to batchClientDPI: %T", v)
return
}

View File

@ -108,7 +108,7 @@ type Datadog struct {
// DatadogUnifi is returned by New() after you provide a Config.
type DatadogUnifi struct {
Collector poller.Collect
datadog statsd.ClientInterface
Statsd statsd.ClientInterface
LastCheck time.Time
*Datadog
}
@ -188,19 +188,21 @@ func (u *DatadogUnifi) setConfigDefaults() {
if u.AggregationFlushInterval != nil {
u.options = append(u.options, statsd.WithAggregationInterval(*u.AggregationFlushInterval))
}
}
func (u *DatadogUnifi) Enabled() bool {
if u == nil {
return false
}
if u.Config == nil {
return false
}
if u.Enable == nil {
return false
}
return *u.Enable
}
@ -208,15 +210,20 @@ func (u *DatadogUnifi) DebugOutput() (bool, error) {
if u == nil {
return true, nil
}
if !u.Enabled() {
return true, nil
}
u.setConfigDefaults()
var err error
u.datadog, err = statsd.New(u.Address, u.options...)
u.Statsd, err = statsd.New(u.Address, u.options...)
if err != nil {
return false, fmt.Errorf("Error configuration Datadog agent reporting: %+v", err)
}
return true, nil
}
@ -225,15 +232,19 @@ func (u *DatadogUnifi) Run(c poller.Collect) error {
u.Collector = c
if !u.Enabled() {
u.LogDebugf("DataDog config missing (or disabled), DataDog output disabled!")
return nil
}
u.Logf("Datadog is enabled")
u.setConfigDefaults()
var err error
u.datadog, err = statsd.New(u.Address, u.options...)
u.Statsd, err = statsd.New(u.Address, u.options...)
if err != nil {
u.LogErrorf("Error configuration Datadog agent reporting: %+v", err)
return err
}
@ -250,28 +261,37 @@ func (u *DatadogUnifi) PollController() {
u.Logf("Everything checks out! Poller started, interval=%+v", interval)
for u.LastCheck = range ticker.C {
u.Collect(interval)
}
}
func (u *DatadogUnifi) Collect(interval time.Duration) {
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
if err != nil {
u.LogErrorf("metric fetch for Datadog failed: %v", err)
continue
return
}
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
if err != nil {
u.LogErrorf("event fetch for Datadog failed", err)
continue
return
}
report, err := u.ReportMetrics(metrics, events)
if err != nil {
// Is the agent down?
u.LogErrorf("unable to report metrics and events", err)
_ = report.reportCount("unifi.collect.errors", 1, []string{})
continue
return
}
_ = report.reportCount("unifi.collect.success", 1, []string{})
u.LogDatadogReport(report)
}
}
// ReportMetrics batches all device and client data into datadog data points.
@ -284,13 +304,16 @@ func (u *DatadogUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Repo
Start: time.Now(),
Counts: &Counts{Val: make(map[item]int)},
Collector: u.Collector,
client: u.datadog,
client: u.Statsd,
}
// batch all the points.
u.loopPoints(r)
r.End = time.Now()
r.Elapsed = r.End.Sub(r.Start)
_ = r.reportTiming("unifi.collector_timing", r.Elapsed, []string{})
return r, nil
}
@ -368,6 +391,7 @@ func (u *DatadogUnifi) switchExport(r report, v any) { //nolint:cyclop
// LogDatadogReport writes a log message after exporting to Datadog.
func (u *DatadogUnifi) LogDatadogReport(r *Report) {
m := r.Metrics
u.Logf("UniFi Metrics Recorded num_sites=%d num_sites_dpi=%d num_clients=%d num_clients_dpi=%d num_rogue_ap=%d num_devices=%d errors=%v elapsec=%v",
len(m.Sites),
len(m.SitesDPI),
@ -378,7 +402,9 @@ func (u *DatadogUnifi) LogDatadogReport(r *Report) {
r.Errors,
r.Elapsed,
)
metricName := metricNamespace("collector")
_ = r.reportCount(metricName("num_sites"), int64(len(m.Sites)), u.Tags)
_ = r.reportCount(metricName("num_sites_dpi"), int64(len(m.SitesDPI)), u.Tags)
_ = r.reportCount(metricName("num_clients"), int64(len(m.Clients)), u.Tags)

View File

@ -0,0 +1,300 @@
package datadogunifi_test
import (
"os"
"sync"
"testing"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/unpoller/unpoller/pkg/datadogunifi"
"github.com/unpoller/unpoller/pkg/testutil"
"golift.io/cnfg"
"gopkg.in/yaml.v3"
)
type mockValue struct {
value any
tags []string
}
// mockStatsd allows us to mock statsd.ClientInterface and collect data to ensure we're writing out
// metrics as expected with the correct types
type mockStatsd struct {
sync.RWMutex
gauges map[string]mockValue
counts map[string]mockValue
histograms map[string]mockValue
distributions map[string]mockValue
sets map[string]mockValue
timings map[string]mockValue
events []string
checks []string
}
// Gauge measures the value of a metric at a particular time.
func (m *mockStatsd) Gauge(name string, value float64, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.gauges[name] = mockValue{value, tags}
return nil
}
// Count tracks how many times something happened per second.
func (m *mockStatsd) Count(name string, value int64, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.counts[name] = mockValue{value, tags}
return nil
}
// Histogram tracks the statistical distribution of a set of values on each host.
func (m *mockStatsd) Histogram(name string, value float64, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.histograms[name] = mockValue{value, tags}
return nil
}
// Distribution tracks the statistical distribution of a set of values across your infrastructure.
func (m *mockStatsd) Distribution(name string, value float64, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.distributions[name] = mockValue{value, tags}
return nil
}
// Decr is just Count of -1
func (m *mockStatsd) Decr(name string, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.counts[name] = mockValue{-1, tags}
return nil
}
// Incr is just Count of 1
func (m *mockStatsd) Incr(name string, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.counts[name] = mockValue{1, tags}
return nil
}
// Set counts the number of unique elements in a group.
func (m *mockStatsd) Set(name string, value string, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.sets[name] = mockValue{value, tags}
return nil
}
// Timing sends timing information, it is an alias for TimeInMilliseconds
func (m *mockStatsd) Timing(name string, value time.Duration, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.timings[name] = mockValue{value, tags}
return nil
}
// TimeInMilliseconds sends timing information in milliseconds.
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
func (m *mockStatsd) TimeInMilliseconds(name string, value float64, tags []string, _ float64) error {
m.Lock()
defer m.Unlock()
m.timings[name] = mockValue{value, tags}
return nil
}
// Event sends the provided Event.
func (m *mockStatsd) Event(e *statsd.Event) error {
m.Lock()
defer m.Unlock()
m.events = append(m.events, e.Title)
return nil
}
// SimpleEvent sends an event with the provided title and text.
func (m *mockStatsd) SimpleEvent(title, _ string) error {
m.Lock()
defer m.Unlock()
m.events = append(m.events, title)
return nil
}
// ServiceCheck sends the provided ServiceCheck.
func (m *mockStatsd) ServiceCheck(sc *statsd.ServiceCheck) error {
m.Lock()
defer m.Unlock()
m.checks = append(m.checks, sc.Name)
return nil
}
// SimpleServiceCheck sends an serviceCheck with the provided name and status.
func (m *mockStatsd) SimpleServiceCheck(name string, _ statsd.ServiceCheckStatus) error {
m.Lock()
defer m.Unlock()
m.checks = append(m.checks, name)
return nil
}
// Close the client connection.
func (m *mockStatsd) Close() error {
return nil
}
// Flush forces a flush of all the queued dogstatsd payloads.
func (m *mockStatsd) Flush() error {
return nil
}
// SetWriteTimeout allows the user to set a custom write timeout.
func (m *mockStatsd) SetWriteTimeout(_ time.Duration) error {
return nil
}
type testExpectations struct {
Gauges []string `yaml:"gauges"`
Counts []string `yaml:"counts"`
Timings []string `yaml:"timings"`
Sets []string `yaml:"sets"`
Histograms []string `yaml:"histograms"`
Distributions []string `yaml:"distributions"`
ServiceChecks []string `yaml:"service_checks"`
}
func TestDataDogUnifiIntegration(t *testing.T) {
// load test expectations file
yamlFile, err := os.ReadFile("integration_test_expectations.yaml")
require.NoError(t, err)
var testExpectationsData testExpectations
err = yaml.Unmarshal(yamlFile, &testExpectationsData)
require.NoError(t, err)
testRig := testutil.NewTestSetup(t)
defer testRig.Close()
mockCapture := &mockStatsd{
gauges: make(map[string]mockValue, 0),
counts: make(map[string]mockValue, 0),
histograms: make(map[string]mockValue, 0),
distributions: make(map[string]mockValue, 0),
sets: make(map[string]mockValue, 0),
timings: make(map[string]mockValue, 0),
events: make([]string, 0),
checks: make([]string, 0),
}
u := datadogunifi.DatadogUnifi{
Datadog: &datadogunifi.Datadog{
Config: &datadogunifi.Config{
Enable: testutil.PBool(true),
Interval: cnfg.Duration{Duration: time.Hour},
},
},
Statsd: mockCapture,
}
testRig.Initialize()
u.Collector = testRig.Collector
u.Collect(time.Minute)
mockCapture.RLock()
defer mockCapture.RUnlock()
// gauges
assert.Equal(t, len(testExpectationsData.Gauges), len(mockCapture.gauges))
expectedKeys := testutil.NewSetFromSlice[string](testExpectationsData.Gauges)
foundKeys := testutil.NewSetFromMap[string](mockCapture.gauges)
additions, deletions := expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// counts
assert.Len(t, mockCapture.counts, 12)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.Counts)
foundKeys = testutil.NewSetFromMap[string](mockCapture.counts)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// timings
assert.Len(t, mockCapture.timings, 2)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.Timings)
foundKeys = testutil.NewSetFromMap[string](mockCapture.timings)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// histograms
assert.Len(t, mockCapture.histograms, 0)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.Histograms)
foundKeys = testutil.NewSetFromMap[string](mockCapture.histograms)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// distributions
assert.Len(t, mockCapture.distributions, 0)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.Distributions)
foundKeys = testutil.NewSetFromMap[string](mockCapture.distributions)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// sets
assert.Len(t, mockCapture.sets, 0)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.Sets)
foundKeys = testutil.NewSetFromMap[string](mockCapture.sets)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// events
// at least one event from an alarm should happen
assert.GreaterOrEqual(t, len(mockCapture.events), 1)
// service checks
assert.Len(t, mockCapture.checks, 0)
expectedKeys = testutil.NewSetFromSlice[string](testExpectationsData.ServiceChecks)
foundKeys = testutil.NewSetFromSlice[string](mockCapture.checks)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
}

View File

@ -0,0 +1,319 @@
gauges:
- unifi.client_dpi.rx_bytes
- unifi.client_dpi.rx_packets
- unifi.client_dpi.tx_bytes
- unifi.client_dpi.tx_packets
- unifi.clients.anomalies
- unifi.clients.bytes_r
- unifi.clients.ccq
- unifi.clients.channel
- unifi.clients.noise
- unifi.clients.powersave_enabled
- unifi.clients.roam_count
- unifi.clients.rssi
- unifi.clients.rx_bytes
- unifi.clients.rx_bytes_r
- unifi.clients.rx_packets
- unifi.clients.rx_rate
- unifi.clients.satisfaction
- unifi.clients.signal
- unifi.clients.tx_bytes
- unifi.clients.tx_bytes_r
- unifi.clients.tx_packets
- unifi.clients.tx_power
- unifi.clients.tx_rate
- unifi.clients.tx_retries
- unifi.clients.uptime
- unifi.clients.wifi_tx_attempts
- unifi.clients.wired_rx_bytes
- unifi.clients.wired_rx_bytes-r
- unifi.clients.wired_rx_packets
- unifi.clients.wired_tx_bytes
- unifi.clients.wired_tx_bytes-r
- unifi.clients.wired_tx_packets
- unifi.subsystems.drops
- unifi.subsystems.gw_cpu
- unifi.subsystems.gw_mem
- unifi.subsystems.gw_uptime
- unifi.subsystems.latency
- unifi.subsystems.num_adopted
- unifi.subsystems.num_ap
- unifi.subsystems.num_disabled
- unifi.subsystems.num_disconnected
- unifi.subsystems.num_guest
- unifi.subsystems.num_gw
- unifi.subsystems.num_iot
- unifi.subsystems.num_new_alarms
- unifi.subsystems.num_pending
- unifi.subsystems.num_sta
- unifi.subsystems.num_sw
- unifi.subsystems.num_user
- unifi.subsystems.remote_user_num_active
- unifi.subsystems.remote_user_num_inactive
- unifi.subsystems.remote_user_rx_bytes
- unifi.subsystems.remote_user_rx_packets
- unifi.subsystems.remote_user_tx_bytes
- unifi.subsystems.remote_user_tx_packets
- unifi.subsystems.rx_bytes_r
- unifi.subsystems.speedtest_lastrun
- unifi.subsystems.speedtest_ping
- unifi.subsystems.tx_bytes_r
- unifi.subsystems.uptime
- unifi.subsystems.xput_down
- unifi.subsystems.xput_up
- unifi.uap.adopted
- unifi.uap.bytes
- unifi.uap.cpu
- unifi.uap.guest_num_sta
- unifi.uap.last_seen
- unifi.uap.loadavg_1
- unifi.uap.loadavg_15
- unifi.uap.loadavg_5
- unifi.uap.locating
- unifi.uap.mem
- unifi.uap.mem_buffer
- unifi.uap.mem_total
- unifi.uap.mem_used
- unifi.uap.memory
- unifi.uap.network
- unifi.uap.num_sta
- unifi.uap.probe
- unifi.uap.rx_bytes
- unifi.uap.stat_guest-rx_bytes
- unifi.uap.stat_guest-rx_crypts
- unifi.uap.stat_guest-rx_dropped
- unifi.uap.stat_guest-rx_errors
- unifi.uap.stat_guest-rx_frags
- unifi.uap.stat_guest-rx_packets
- unifi.uap.stat_guest-tx_bytes
- unifi.uap.stat_guest-tx_dropped
- unifi.uap.stat_guest-tx_errors
- unifi.uap.stat_guest-tx_packets
- unifi.uap.stat_guest-tx_retries
- unifi.uap.stat_rx_bytes
- unifi.uap.stat_rx_crypts
- unifi.uap.stat_rx_dropped
- unifi.uap.stat_rx_errors
- unifi.uap.stat_rx_frags
- unifi.uap.stat_rx_packets
- unifi.uap.stat_tx_bytes
- unifi.uap.stat_tx_dropped
- unifi.uap.stat_tx_errors
- unifi.uap.stat_tx_packets
- unifi.uap.stat_user-rx_bytes
- unifi.uap.stat_user-rx_crypts
- unifi.uap.stat_user-rx_dropped
- unifi.uap.stat_user-rx_errors
- unifi.uap.stat_user-rx_frags
- unifi.uap.stat_user-rx_packets
- unifi.uap.stat_user-tx_bytes
- unifi.uap.stat_user-tx_dropped
- unifi.uap.stat_user-tx_errors
- unifi.uap.stat_user-tx_packets
- unifi.uap.stat_user-tx_retries
- unifi.uap.state
- unifi.uap.sys
- unifi.uap.system_uptime
- unifi.uap.tx_bytes
- unifi.uap.upgradeable
- unifi.uap.uptime
- unifi.uap.user_num_sta
- unifi.uap_radios.ast_be_xmit
- unifi.uap_radios.channel
- unifi.uap_radios.cu_self_rx
- unifi.uap_radios.cu_self_tx
- unifi.uap_radios.cu_total
- unifi.uap_radios.current_antenna_gain
- unifi.uap_radios.ext_channel
- unifi.uap_radios.gain
- unifi.uap_radios.guest_num_sta
- unifi.uap_radios.max_txpower
- unifi.uap_radios.min_txpower
- unifi.uap_radios.nss
- unifi.uap_radios.num_sta
- unifi.uap_radios.radio_caps
- unifi.uap_radios.tx_packets
- unifi.uap_radios.tx_power
- unifi.uap_radios.tx_retries
- unifi.uap_radios.user_num_sta
- unifi.uap_rogue.age
- unifi.uap_rogue.bw
- unifi.uap_rogue.center_freq
- unifi.uap_rogue.channel
- unifi.uap_rogue.freq
- unifi.uap_rogue.noise
- unifi.uap_rogue.rssi
- unifi.uap_rogue.rssi_age
- unifi.uap_rogue.signal
- unifi.uap_vaps.avg_client_signal
- unifi.uap_vaps.ccq
- unifi.uap_vaps.channel
- unifi.uap_vaps.mac_filter_rejections
- unifi.uap_vaps.num_satisfaction_sta
- unifi.uap_vaps.num_sta
- unifi.uap_vaps.rx_bytes
- unifi.uap_vaps.rx_crypts
- unifi.uap_vaps.rx_dropped
- unifi.uap_vaps.rx_errors
- unifi.uap_vaps.rx_frags
- unifi.uap_vaps.rx_nwids
- unifi.uap_vaps.rx_packets
- unifi.uap_vaps.rx_tcp_goodbytes
- unifi.uap_vaps.rx_tcp_lat_avg
- unifi.uap_vaps.rx_tcp_lat_max
- unifi.uap_vaps.rx_tcp_lat_min
- unifi.uap_vaps.satisfaction
- unifi.uap_vaps.satisfaction_now
- unifi.uap_vaps.tx_bytes
- unifi.uap_vaps.tx_combined_retries
- unifi.uap_vaps.tx_data_mpdu_bytes
- unifi.uap_vaps.tx_dropped
- unifi.uap_vaps.tx_errors
- unifi.uap_vaps.tx_packets
- unifi.uap_vaps.tx_power
- unifi.uap_vaps.tx_retries
- unifi.uap_vaps.tx_rts_retries
- unifi.uap_vaps.tx_success
- unifi.uap_vaps.tx_tcp_goodbytes
- unifi.uap_vaps.tx_tcp_lat_avg
- unifi.uap_vaps.tx_tcp_lat_max
- unifi.uap_vaps.tx_tcp_lat_min
- unifi.uap_vaps.tx_total
- unifi.uap_vaps.wifi_tx_latency_mov_avg
- unifi.uap_vaps.wifi_tx_latency_mov_cuont
- unifi.uap_vaps.wifi_tx_latency_mov_max
- unifi.uap_vaps.wifi_tx_latency_mov_min
- unifi.uap_vaps.wifi_tx_latency_mov_total
- unifi.usg.bytes
- unifi.usg.cpu
- unifi.usg.guest_num_sta
- unifi.usg.lan_rx_bytes
- unifi.usg.lan_rx_dropped
- unifi.usg.lan_rx_packets
- unifi.usg.lan_tx_bytes
- unifi.usg.lan_tx_packets
- unifi.usg.last_seen
- unifi.usg.loadavg_1
- unifi.usg.loadavg_15
- unifi.usg.loadavg_5
- unifi.usg.mem
- unifi.usg.mem_buffer
- unifi.usg.mem_total
- unifi.usg.mem_used
- unifi.usg.memory
- unifi.usg.network
- unifi.usg.networks.num_sta
- unifi.usg.networks.rx_bytes
- unifi.usg.networks.rx_packets
- unifi.usg.networks.tx_bytes
- unifi.usg.networks.tx_packets
- unifi.usg.num_desktop
- unifi.usg.num_handheld
- unifi.usg.num_mobile
- unifi.usg.probe
- unifi.usg.rx_bytes
- unifi.usg.speedtest_status_latency
- unifi.usg.speedtest_status_ping
- unifi.usg.speedtest_status_rundate
- unifi.usg.speedtest_status_runtime
- unifi.usg.speedtest_status_xput_download
- unifi.usg.speedtest_status_xput_upload
- unifi.usg.state
- unifi.usg.storage_bar_pct
- unifi.usg.storage_bar_size
- unifi.usg.storage_bar_used
- unifi.usg.storage_foo_pct
- unifi.usg.storage_foo_size
- unifi.usg.storage_foo_used
- unifi.usg.sys
- unifi.usg.system_uptime
- unifi.usg.temp_cpu
- unifi.usg.temp_sys
- unifi.usg.tx_bytes
- unifi.usg.upgradeable
- unifi.usg.uplink_latency
- unifi.usg.uplink_speed
- unifi.usg.uptime
- unifi.usg.user_num_sta
- unifi.usg.wan_ports.bytes_r
- unifi.usg.wan_ports.full_duplex
- unifi.usg.wan_ports.max_speed
- unifi.usg.wan_ports.rx_broadcast
- unifi.usg.wan_ports.rx_bytes
- unifi.usg.wan_ports.rx_bytes_r
- unifi.usg.wan_ports.rx_dropped
- unifi.usg.wan_ports.rx_errors
- unifi.usg.wan_ports.rx_multicast
- unifi.usg.wan_ports.rx_packets
- unifi.usg.wan_ports.speed
- unifi.usg.wan_ports.tx_broadcast
- unifi.usg.wan_ports.tx_bytes
- unifi.usg.wan_ports.tx_bytes_r
- unifi.usg.wan_ports.tx_dropped
- unifi.usg.wan_ports.tx_errors
- unifi.usg.wan_ports.tx_multicast
- unifi.usg.wan_ports.tx_packets
- unifi.usw.bytes
- unifi.usw.guest_num_sta
- unifi.usw.last_seen
- unifi.usw.ports.bytes_r
- unifi.usw.ports.poe_current
- unifi.usw.ports.poe_power
- unifi.usw.ports.poe_voltage
- unifi.usw.ports.rx_broadcast
- unifi.usw.ports.rx_bytes
- unifi.usw.ports.rx_bytes_r
- unifi.usw.ports.rx_dropped
- unifi.usw.ports.rx_errors
- unifi.usw.ports.rx_multicast
- unifi.usw.ports.rx_packets
- unifi.usw.ports.sfp_current
- unifi.usw.ports.sfp_rx_power
- unifi.usw.ports.sfp_temperature
- unifi.usw.ports.sfp_tx_power
- unifi.usw.ports.sfp_voltage
- unifi.usw.ports.speed
- unifi.usw.ports.stp_path_cost
- unifi.usw.ports.tx_broadcast
- unifi.usw.ports.tx_bytes
- unifi.usw.ports.tx_bytes_r
- unifi.usw.ports.tx_dropped
- unifi.usw.ports.tx_errors
- unifi.usw.ports.tx_multicast
- unifi.usw.ports.tx_packets
- unifi.usw.rx_bytes
- unifi.usw.stat_bytes
- unifi.usw.stat_rx_bytes
- unifi.usw.stat_rx_crypts
- unifi.usw.stat_rx_dropped
- unifi.usw.stat_rx_errors
- unifi.usw.stat_rx_frags
- unifi.usw.stat_rx_packets
- unifi.usw.stat_tx_bytes
- unifi.usw.stat_tx_dropped
- unifi.usw.stat_tx_errors
- unifi.usw.stat_tx_packets
- unifi.usw.stat_tx_retries
- unifi.usw.tx_bytes
- unifi.usw.upgradeable
- unifi.usw.uptime
counts:
- unifi.collector.num_devices
- unifi.collector.num_errors
- unifi.sitedpi.tx_packets
- unifi.sitedpi.rx_packets
- unifi.collector.num_sites
- unifi.collect.success
- unifi.collector.num_rogue_ap
- unifi.collector.num_sites_dpi
- unifi.collector.num_clients_dpi
- unifi.sitedpi.tx_bytes
- unifi.sitedpi.rx_bytes
- unifi.collector.num_clients
timings:
- unifi.collector_timing
- unifi.collector.elapsed_time
histograms: []
distributions: []
sets: []
service_checks: []

View File

@ -45,7 +45,9 @@ func (u *DatadogUnifi) batchPDU(r report, s *unifi.PDU) {
"user_num_sta": s.UserNumSta.Val,
"upgradeable": boolToFloat64(s.Upgradeable.Val),
})
r.addCount(pduT)
metricName := metricNamespace("pdu")
reportGaugeForFloat64Map(r, metricName, data, tags)

View File

@ -14,6 +14,7 @@ func tagMapToTags(tagMap map[string]string) []string {
for k, v := range tagMap {
tags = append(tags, tag(k, v))
}
return tags
}
@ -22,6 +23,7 @@ func tagMapToSimpleStrings(tagMap map[string]string) string {
for k, v := range tagMap {
result = fmt.Sprintf("%s%s=\"%v\", ", result, k, v)
}
return strings.TrimRight(result, ", ")
}
@ -52,5 +54,6 @@ func boolToFloat64(v bool) float64 {
if v {
return 1.0
}
return 0.0
}

View File

@ -111,6 +111,7 @@ func (r *Report) reportEvent(title string, date time.Time, message string, tags
if date.IsZero() {
date = time.Now()
}
return r.client.Event(&statsd.Event{
Title: title,
Text: message,

View File

@ -1,6 +1,8 @@
package datadogunifi
import (
"strings"
"github.com/unpoller/unifi"
)
@ -46,10 +48,6 @@ func (u *DatadogUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
// batchUAP generates Wireless-Access-Point datapoints for Datadog.
// These points can be passed directly to datadog.
func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := cleanTags(map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
@ -61,7 +59,10 @@ func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
"type": s.Type,
"ip": s.IP,
})
data := CombineFloat64(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
data := CombineFloat64(
u.processUAPstats(s.Stat.Ap),
u.batchSysStats(s.SysStats, s.SystemStats),
)
data["bytes"] = s.Bytes.Val
data["last_seen"] = s.LastSeen.Val
data["rx_bytes"] = s.RxBytes.Val
@ -70,7 +71,9 @@ func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
data["user_num_sta"] = s.UserNumSta.Val
data["guest_num_sta"] = s.GuestNumSta.Val
data["num_sta"] = s.NumSta.Val
data["upgradeable"] = boolToFloat64(s.Upgradable.Val)
data["upgradeable"] = s.Upgradable.Float64()
data["adopted"] = s.Adopted.Float64()
data["locating"] = s.Locating.Float64()
r.addCount(uapT)
@ -210,7 +213,7 @@ func (u *DatadogUnifi) processRadTable(r report, t map[string]string, rt unifi.R
}
for _, t := range rts {
if t.Name == p.Name {
if strings.EqualFold(t.Name, p.Name) {
data["ast_be_xmit"] = t.AstBeXmit.Val
data["channel"] = t.Channel.Val
data["cu_self_rx"] = t.CuSelfRx.Val

View File

@ -1,7 +1,7 @@
package datadogunifi
import (
"strconv"
"regexp"
"strings"
"github.com/unpoller/unifi"
@ -36,6 +36,12 @@ func CombineFloat64(in ...map[string]float64) map[string]float64 {
return out
}
var statNameRe = regexp.MustCompile("[^a-zA-Z0-9_]")
func safeStatsName(s string) string {
return statNameRe.ReplaceAllString(strings.ToLower(s), "_")
}
// batchSysStats is used by all device types.
func (u *DatadogUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]float64 {
m := map[string]float64{
@ -51,11 +57,11 @@ func (u *DatadogUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map
}
for k, v := range ss.Temps {
temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
temp := v.Celsius()
k = safeStatsName(k)
if temp != 0 && k != "" {
m["temp_"+strings.ToLower(k)] = float64(temp)
m[k] = float64(temp)
}
}
@ -66,7 +72,11 @@ func (u *DatadogUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]float
output := make(map[string]float64)
for _, t := range temps {
output["temp_"+t.Name] = t.Value
if t.Name == "" {
continue
}
output[safeStatsName("temp_"+t.Name)] = t.Value
}
return output
@ -76,13 +86,17 @@ func (u *DatadogUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]floa
output := make(map[string]float64)
for _, t := range storage {
output["storage_"+t.Name+"_size"] = t.Size.Val
output["storage_"+t.Name+"_used"] = t.Used.Val
if t.Name == "" {
continue
}
output[safeStatsName("storage_"+t.Name+"_size")] = t.Size.Val
output[safeStatsName("storage_"+t.Name+"_used")] = t.Used.Val
if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
output[safeStatsName("storage_"+t.Name+"_pct")] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
} else {
output["storage_"+t.Name+"_pct"] = 0
output[safeStatsName("storage_"+t.Name+"_pct")] = 0
}
}
@ -130,7 +144,9 @@ func (u *DatadogUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
)
r.addCount(udmT)
metricName := metricNamespace("usg")
reportGaugeForFloat64Map(r, metricName, data, tags)
u.batchNetTable(r, tags, s.NetworkTable)

View File

@ -49,6 +49,7 @@ func (u *DatadogUnifi) batchUSG(r report, s *unifi.USG) {
r.addCount(usgT)
metricName := metricNamespace("usg")
reportGaugeForFloat64Map(r, metricName, data, tags)
u.batchNetTable(r, tags, s.NetworkTable)
@ -101,6 +102,7 @@ func (u *DatadogUnifi) batchUSGwans(r report, tags map[string]string, wans ...un
if wan.FullDuplex.Val {
fullDuplex = 1.0
}
data := map[string]float64{
"bytes_r": wan.BytesR.Val,
"full_duplex": fullDuplex,

View File

@ -43,7 +43,9 @@ func (u *DatadogUnifi) batchUSW(r report, s *unifi.USW) {
})
r.addCount(uswT)
metricName := metricNamespace("usw")
reportGaugeForFloat64Map(r, metricName, data, tags)
u.batchPortTable(r, tags, s.PortTable)

View File

@ -26,14 +26,17 @@ func (u *DatadogUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
"ip": s.IP,
"license_state": s.LicenseState,
})
var gw *unifi.Gw = nil
var gw *unifi.Gw
if s.Stat != nil {
gw = s.Stat.Gw
}
var sw *unifi.Sw = nil
var sw *unifi.Sw
if s.Stat != nil {
sw = s.Stat.Sw
}
data := CombineFloat64(
u.batchUDMstorage(s.Storage),
u.batchUDMtemps(s.Temperatures),

View File

@ -33,6 +33,7 @@ func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
"channel": s.Channel.Txt,
"vlan": s.Vlan.Txt,
}
fields := map[string]any{
"anomalies": s.Anomalies.Int64(),
"ip": s.IP,
@ -80,6 +81,7 @@ func (u *InfluxUnifi) batchClientDPI(r report, v any, appTotal, catTotal totalsD
s, ok := v.(*unifi.DPITable)
if !ok {
u.LogErrorf("invalid type given to batchClientDPI: %T", v)
return
}

View File

@ -71,8 +71,8 @@ type InfluxDB struct {
// InfluxUnifi is returned by New() after you provide a Config.
type InfluxUnifi struct {
Collector poller.Collect
influxV1 influxV1.Client
influxV2 influx.Client
InfluxV1Client influxV1.Client
InfluxV2Client influx.Client
LastCheck time.Time
IsVersion2 bool
*InfluxDB
@ -103,43 +103,54 @@ func (u *InfluxUnifi) PollController() {
interval := u.Interval.Round(time.Second)
ticker := time.NewTicker(interval)
version := "1"
if u.IsVersion2 {
version = "2"
}
u.Logf("Poller->InfluxDB started, version: %s, interval: %v, dp: %v, db: %s, url: %s, bucket: %s, org: %s",
version, interval, u.DeadPorts, u.DB, u.URL, u.Bucket, u.Org)
for u.LastCheck = range ticker.C {
u.Poll(interval)
}
}
func (u *InfluxUnifi) Poll(interval time.Duration) {
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
if err != nil {
u.LogErrorf("metric fetch for InfluxDB failed: %v", err)
continue
return
}
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
if err != nil {
u.LogErrorf("event fetch for InfluxDB failed: %v", err)
continue
return
}
report, err := u.ReportMetrics(metrics, events)
if err != nil {
// XXX: reset and re-auth? not sure..
u.LogErrorf("%v", err)
continue
return
}
u.Logf("UniFi Metrics Recorded. %v", report)
}
}
func (u *InfluxUnifi) Enabled() bool {
if u == nil {
return false
}
if u.Config == nil {
return false
}
return !u.Disable
}
@ -147,10 +158,13 @@ func (u *InfluxUnifi) DebugOutput() (bool, error) {
if u == nil {
return true, nil
}
if !u.Enabled() {
return true, nil
}
u.setConfigDefaults()
_, err := url.Parse(u.Config.URL)
if err != nil {
return false, fmt.Errorf("invalid influx URL: %v", err)
@ -160,18 +174,21 @@ func (u *InfluxUnifi) DebugOutput() (bool, error) {
// we're a version 2
tlsConfig := &tls.Config{InsecureSkipVerify: !u.VerifySSL} // nolint: gosec
serverOptions := influx.DefaultOptions().SetTLSConfig(tlsConfig).SetBatchSize(u.BatchSize)
u.influxV2 = influx.NewClientWithOptions(u.URL, u.AuthToken, serverOptions)
u.InfluxV2Client = influx.NewClientWithOptions(u.URL, u.AuthToken, serverOptions)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
ok, err := u.influxV2.Ping(ctx)
ok, err := u.InfluxV2Client.Ping(ctx)
if err != nil {
return false, err
}
if !ok {
return false, fmt.Errorf("unsuccessful ping to influxdb2")
}
} else {
u.influxV1, err = influxV1.NewHTTPClient(influxV1.HTTPConfig{
u.InfluxV1Client, err = influxV1.NewHTTPClient(influxV1.HTTPConfig{
Addr: u.URL,
Username: u.User,
Password: u.Pass,
@ -180,19 +197,23 @@ func (u *InfluxUnifi) DebugOutput() (bool, error) {
if err != nil {
return false, fmt.Errorf("making client: %w", err)
}
_, _, err = u.influxV1.Ping(time.Second * 2)
_, _, err = u.InfluxV1Client.Ping(time.Second * 2)
if err != nil {
return false, fmt.Errorf("unsuccessful ping to influxdb1")
}
}
return true, nil
}
// Run runs a ticker to poll the unifi server and update influxdb.
func (u *InfluxUnifi) Run(c poller.Collect) error {
u.Collector = c
if !u.Enabled() {
u.LogDebugf("InfluxDB config missing (or disabled), InfluxDB output disabled!")
return nil
}
@ -205,6 +226,7 @@ func (u *InfluxUnifi) Run(c poller.Collect) error {
_, err = url.Parse(u.Config.URL)
if err != nil {
u.LogErrorf("invalid influx URL: %v", err)
return err
}
@ -212,9 +234,9 @@ func (u *InfluxUnifi) Run(c poller.Collect) error {
// we're a version 2
tlsConfig := &tls.Config{InsecureSkipVerify: !u.VerifySSL} // nolint: gosec
serverOptions := influx.DefaultOptions().SetTLSConfig(tlsConfig).SetBatchSize(u.BatchSize)
u.influxV2 = influx.NewClientWithOptions(u.URL, u.AuthToken, serverOptions)
u.InfluxV2Client = influx.NewClientWithOptions(u.URL, u.AuthToken, serverOptions)
} else {
u.influxV1, err = influxV1.NewHTTPClient(influxV1.HTTPConfig{
u.InfluxV1Client, err = influxV1.NewHTTPClient(influxV1.HTTPConfig{
Addr: u.URL,
Username: u.User,
Password: u.Pass,
@ -310,7 +332,7 @@ func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Repor
if u.IsVersion2 {
// Make a new Influx Points Batcher.
r.writer = u.influxV2.WriteAPI(u.Org, u.Bucket)
r.writer = u.InfluxV2Client.WriteAPI(u.Org, u.Bucket)
go u.collect(r, r.ch)
// Batch all the points.
@ -335,7 +357,7 @@ func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Repor
r.wg.Wait() // wait for all points to finish batching!
// Send all the points.
if err = u.influxV1.Write(r.bp); err != nil {
if err = u.InfluxV1Client.Write(r.bp); err != nil {
return nil, fmt.Errorf("influxdb.Write(points): %w", err)
}
}
@ -363,6 +385,7 @@ func (u *InfluxUnifi) collect(r report, ch chan *metric) {
r.error(err)
}
r.done()
}
}

View File

@ -0,0 +1,257 @@
package influxunifi_test
import (
"fmt"
"log"
"os"
"sort"
"testing"
"time"
influxV1Models "github.com/influxdata/influxdb1-client/models"
influxV1 "github.com/influxdata/influxdb1-client/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/unpoller/unpoller/pkg/influxunifi"
"github.com/unpoller/unpoller/pkg/testutil"
"golift.io/cnfg"
"gopkg.in/yaml.v3"
)
var errNotImplemented = fmt.Errorf("not implemented")
type mockInfluxV1Client struct {
databases map[string]bool
points map[string]foundPoint
}
type foundPoint struct {
Tags map[string]string `json:"tags"`
Fields map[string]string `json:"fields"`
}
func newMockInfluxV1Client() *mockInfluxV1Client {
return &mockInfluxV1Client{
databases: make(map[string]bool, 0),
points: make(map[string]foundPoint, 0),
}
}
func (m *mockInfluxV1Client) toTestData() testExpectations {
dbs := make([]string, 0)
for k := range m.databases {
dbs = append(dbs, k)
}
sort.Strings(dbs)
result := testExpectations{
Databases: dbs,
Points: map[string]testPointExpectation{},
}
for k, p := range m.points {
tags := make([]string, 0)
for t := range p.Tags {
tags = append(tags, t)
}
sort.Strings(tags)
result.Points[k] = testPointExpectation{
Tags: tags,
Fields: p.Fields,
}
}
return result
}
func (m *mockInfluxV1Client) Ping(_ time.Duration) (time.Duration, string, error) {
return time.Millisecond, "", nil
}
func influxV1FieldTypeToString(ft influxV1Models.FieldType) string {
switch ft {
case influxV1Models.Integer:
return "int"
case influxV1Models.Float:
return "float"
case influxV1Models.Boolean:
return "bool"
case influxV1Models.Empty:
return "none"
case influxV1Models.String:
return "string"
case influxV1Models.Unsigned:
return "uint"
default:
return "unknown"
}
}
func (m *mockInfluxV1Client) Write(bp influxV1.BatchPoints) error {
m.databases[bp.Database()] = true
for _, p := range bp.Points() {
if existing, ok := m.points[p.Name()]; !ok {
m.points[p.Name()] = foundPoint{
Tags: p.Tags(),
Fields: make(map[string]string),
}
} else {
for k, v := range p.Tags() {
existing.Tags[k] = v
}
}
fields, err := p.Fields()
if err != nil {
continue
}
point, _ := influxV1Models.NewPoint(p.Name(), influxV1Models.NewTags(p.Tags()), fields, p.Time())
fieldIter := point.FieldIterator()
for fieldIter.Next() {
fieldName := string(fieldIter.FieldKey())
fieldType := influxV1FieldTypeToString(fieldIter.Type())
if _, exists := m.points[p.Name()].Fields[fieldName]; exists {
if fieldType != "" {
m.points[p.Name()].Fields[fieldName] = fieldType
}
} else {
if fieldType == "" {
m.points[p.Name()].Fields[fieldName] = "unknown"
} else {
m.points[p.Name()].Fields[fieldName] = fieldType
}
}
}
}
return nil
}
func (m *mockInfluxV1Client) Query(_ influxV1.Query) (*influxV1.Response, error) {
return nil, errNotImplemented
}
func (m *mockInfluxV1Client) QueryAsChunk(_ influxV1.Query) (*influxV1.ChunkedResponse, error) {
return nil, errNotImplemented
}
func (m *mockInfluxV1Client) Close() error {
return nil
}
type testPointExpectation struct {
Tags []string `json:"tags"`
Fields map[string]string `json:"fields"`
}
type testExpectations struct {
Databases []string `json:"databases"`
Points map[string]testPointExpectation `json:"points"`
}
func TestInfluxV1Integration(t *testing.T) {
// load test expectations file
yamlFile, err := os.ReadFile("integration_test_expectations.yaml")
require.NoError(t, err)
var testExpectationsData testExpectations
err = yaml.Unmarshal(yamlFile, &testExpectationsData)
require.NoError(t, err)
testRig := testutil.NewTestSetup(t)
defer testRig.Close()
mockCapture := newMockInfluxV1Client()
u := influxunifi.InfluxUnifi{
Collector: testRig.Collector,
IsVersion2: false,
InfluxV1Client: mockCapture,
InfluxDB: &influxunifi.InfluxDB{
Config: &influxunifi.Config{
DB: "unpoller",
URL: testRig.MockServer.Server.URL,
Interval: cnfg.Duration{Duration: time.Hour},
},
},
}
testRig.Initialize()
u.Poll(time.Minute)
// databases
assert.Len(t, mockCapture.databases, 1)
expectedKeys := testutil.NewSetFromSlice[string](testExpectationsData.Databases)
foundKeys := testutil.NewSetFromMap[string](mockCapture.databases)
additions, deletions := expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// point names
assert.Len(t, testutil.NewSetFromMap[string](mockCapture.points).Slice(), len(testExpectationsData.Points))
expectedKeys = testutil.NewSetFromMap[string](testExpectationsData.Points)
foundKeys = testutil.NewSetFromMap[string](mockCapture.points)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0)
assert.Len(t, deletions, 0)
// validate tags and fields per point
pointNames := testutil.NewSetFromMap[string](testExpectationsData.Points).Slice()
sort.Strings(pointNames)
for _, pointName := range pointNames {
expectedContent := testExpectationsData.Points[pointName]
foundContent := mockCapture.points[pointName]
// check tags left intact
expectedKeys = testutil.NewSetFromSlice[string](expectedContent.Tags)
foundKeys = testutil.NewSetFromMap[string](foundContent.Tags)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(t, additions, 0, "point \"%s\" found the following tag keys have a difference!: additions=%+v", pointName, additions)
assert.Len(t, deletions, 0, "point \"%s\" found the following tag keys have a difference!: deletions=%+v", pointName, deletions)
// check field keys intact
expectedKeys = testutil.NewSetFromMap[string](expectedContent.Fields)
foundKeys = testutil.NewSetFromMap[string](foundContent.Fields)
additions, deletions = expectedKeys.Difference(foundKeys)
assert.Len(
t,
additions,
0,
"point \"%s\" found the following field keys have a difference!: additions=%+v foundKeys=%+v",
pointName,
additions,
foundKeys.Slice(),
)
assert.Len(
t,
deletions,
0,
"point \"%s\" found the following field keys have a difference!: deletions=%+v foundKeys=%+v",
pointName,
deletions,
foundKeys.Slice(),
)
// check field types
fieldNames := testutil.NewSetFromMap[string](expectedContent.Fields).Slice()
sort.Strings(fieldNames)
for _, fieldName := range fieldNames {
expectedFieldType := expectedContent.Fields[fieldName]
foundFieldType := foundContent.Fields[fieldName]
assert.Equal(t, expectedFieldType, foundFieldType, "point \"%s\" field \"%s\" had a difference in declared type \"%s\" vs \"%s\", this is not safe for backwards compatibility", pointName, fieldName, expectedFieldType, foundFieldType)
}
}
capturedTestData := mockCapture.toTestData()
buf, _ := yaml.Marshal(&capturedTestData)
log.Println("generated expectation yaml:\n" + string(buf))
}

View File

@ -0,0 +1,633 @@
databases:
- unpoller
points:
clientdpi:
tags:
- application
- category
- mac
- name
- site_name
- source
fields:
rx_bytes: int
rx_packets: int
tx_bytes: int
tx_packets: int
clients:
tags:
- channel
- dev_cat
- dev_family
- dev_id
- dev_vendor
- fixed_ip
- is_guest
- is_wired
- mac
- name
- os_class
- os_name
- oui
- radio
- radio_name
- radio_proto
- site_name
- source
- sw_port
- use_fixedip
- vlan
fields:
anomalies: int
bssid: string
bytes_r: int
ccq: int
channel: float
essid: string
hostname: string
ip: string
noise: int
note: string
powersave_enabled: string
radio_desc: string
roam_count: int
rssi: int
rx_bytes: int
rx_bytes_r: int
rx_packets: int
rx_rate: int
satisfaction: float
signal: int
tx_bytes: int
tx_bytes_r: int
tx_packets: int
tx_power: int
tx_rate: int
tx_retries: int
uptime: int
wifi_tx_attempts: int
wired-rx_bytes: int
wired-rx_bytes-r: int
wired-rx_packets: int
wired-tx_bytes: int
wired-tx_bytes-r: int
wired-tx_packets: int
sitedpi:
tags:
- application
- category
- site_name
- source
fields:
rx_bytes: int
rx_packets: int
tx_bytes: int
tx_packets: int
subsystems:
tags:
- desc
- gw_name
- lan_ip
- name
- site_name
- source
- status
- subsystem
- wan_ip
fields:
drops: float
gw_cpu: float
gw_mem: float
gw_uptime: float
latency: float
num_adopted: float
num_ap: float
num_disabled: float
num_disconnected: float
num_guest: float
num_gw: float
num_iot: float
num_new_alarms: float
num_pending: float
num_sta: float
num_sw: float
num_user: float
remote_user_num_active: float
remote_user_num_inactive: float
remote_user_rx_bytes: float
remote_user_rx_packets: float
remote_user_tx_bytes: float
remote_user_tx_packets: float
rx_bytes-r: int
speedtest_lastrun: float
speedtest_ping: float
tx_bytes-r: int
uptime: int
wan_ip: string
xput_down: float
xput_up: float
uap:
tags:
- mac
- model
- name
- serial
- site_name
- source
- type
- version
fields:
bytes: float
cpu: float
guest-num_sta: int
ip: string
last_seen: float
loadavg_15: float
loadavg_1: float
loadavg_5: float
mem: float
mem_buffer: float
mem_total: float
mem_used: float
num_sta: float
rx_bytes: float
stat_guest-rx_bytes: float
stat_guest-rx_crypts: float
stat_guest-rx_dropped: float
stat_guest-rx_errors: float
stat_guest-rx_frags: float
stat_guest-rx_packets: float
stat_guest-tx_bytes: float
stat_guest-tx_dropped: float
stat_guest-tx_errors: float
stat_guest-tx_packets: float
stat_guest-tx_retries: float
stat_rx_bytes: float
stat_rx_crypts: float
stat_rx_dropped: float
stat_rx_errors: float
stat_rx_frags: float
stat_rx_packets: float
stat_tx_bytes: float
stat_tx_dropped: float
stat_tx_errors: float
stat_tx_packets: float
stat_user-rx_bytes: float
stat_user-rx_crypts: float
stat_user-rx_dropped: float
stat_user-rx_errors: float
stat_user-rx_frags: float
stat_user-rx_packets: float
stat_user-tx_bytes: float
stat_user-tx_dropped: float
stat_user-tx_errors: float
stat_user-tx_packets: float
stat_user-tx_retries: float
state: string
system_uptime: float
temp_cpu: int
temp_memory: int
temp_network: int
temp_probe: int
temp_sys: int
tx_bytes: float
upgradeable: bool
uptime: float
user-num_sta: int
uap_radios:
tags:
- channel
- device_name
- radio
- site_name
- source
fields:
ast_be_xmit: float
channel: float
cu_self_rx: float
cu_self_tx: float
cu_total: float
current_antenna_gain: float
extchannel: float
gain: float
guest-num_sta: float
ht: string
max_txpower: float
min_txpower: float
nss: float
num_sta: float
radio: string
radio_caps: float
tx_packets: float
tx_power: float
tx_retries: float
user-num_sta: float
uap_rogue:
tags:
- ap_mac
- band
- mac
- name
- oui
- radio
- radio_name
- security
- site_name
- source
fields:
age: float
bw: float
center_freq: float
channel: int
freq: float
noise: float
rssi: float
rssi_age: float
signal: float
uap_vaps:
tags:
- ap_mac
- bssid
- device_name
- essid
- id
- is_guest
- name
- radio
- radio_name
- site_id
- site_name
- source
- state
- usage
fields:
avg_client_signal: float
ccq: int
channel: float
mac_filter_rejections: int
num_satisfaction_sta: float
num_sta: int
rx_bytes: float
rx_crypts: float
rx_dropped: float
rx_errors: float
rx_frags: float
rx_nwids: float
rx_packets: float
rx_tcp_goodbytes: float
rx_tcp_lat_avg: float
rx_tcp_lat_max: float
rx_tcp_lat_min: float
satisfaction: float
satisfaction_now: float
tx_bytes: float
tx_combined_retries: float
tx_data_mpdu_bytes: float
tx_dropped: float
tx_errors: float
tx_packets: float
tx_power: float
tx_retries: float
tx_rts_retries: float
tx_success: float
tx_tcp_goodbytes: float
tx_tcp_lat_avg: float
tx_tcp_lat_max: float
tx_tcp_lat_min: float
tx_total: float
wifi_tx_latency_mov_avg: float
wifi_tx_latency_mov_cuont: float
wifi_tx_latency_mov_max: float
wifi_tx_latency_mov_min: float
wifi_tx_latency_mov_total: float
unifi_alarm:
tags:
- action
- app_proto
- archived
- catname
- event_type
- in_iface
- key
- proto
- site_name
- source
- subsystem
- usgip
fields:
dest_ip: string
dest_port: int
dst_mac: string
dstip_asn: int
dstip_city: string
dstip_continent_code: string
dstip_country_code: string
dstip_country_name: string
dstip_latitude: float
dstip_longitude: float
dstip_organization: string
host: string
msg: string
src_ip: string
src_mac: string
src_port: int
srcip_asn: int
srcip_city: string
srcip_continent_code: string
srcip_country_code: string
srcip_country_name: string
srcip_latitude: float
srcip_longitude: float
srcip_organization: string
unifi_events:
tags:
- action
- admin
- ap
- ap_from
- ap_name
- ap_to
- app_proto
- catname
- channel
- channel_from
- channel_to
- event_type
- gw
- gw_name
- in_iface
- is_admin
- key
- network
- proto
- radio
- radio_from
- radio_to
- site_name
- source
- ssid
- subsystem
- sw
- sw_name
- usgip
fields:
bytes: float
dest_ip: string
dest_port: int
dst_mac: string
dstip_asn: int
dstip_city: string
dstip_continent_code: string
dstip_country_code: string
dstip_country_name: string
dstip_latitude: float
dstip_longitude: float
dstip_organization: string
duration: float
guest: string
host: string
hostname: string
ip: string
msg: string
src_ip: string
src_mac: string
src_port: int
srcip_asn: int
srcip_city: string
srcip_continent_code: string
srcip_country_code: string
srcip_country_name: string
srcip_latitude: float
srcip_longitude: float
srcip_organization: string
user: string
unifi_ids:
tags:
- action
- app_proto
- archived
- catname
- event_type
- in_iface
- key
- proto
- site_name
- source
- subsystem
- usgip
fields:
dest_ip: string
dest_port: int
dst_mac: string
dstip_asn: int
dstip_city: string
dstip_continent_code: string
dstip_country_code: string
dstip_country_name: string
dstip_latitude: float
dstip_longitude: float
dstip_organization: string
host: string
msg: string
src_ip: string
src_mac: string
src_port: int
srcip_asn: int
srcip_city: string
srcip_continent_code: string
srcip_country_code: string
srcip_country_name: string
srcip_latitude: float
srcip_longitude: float
srcip_organization: string
usg:
tags:
- mac
- model
- name
- serial
- site_name
- source
- type
- version
fields:
bytes: float
cpu: float
guest-num_sta: float
ip: string
lan-rx_bytes: float
lan-rx_dropped: float
lan-rx_packets: float
lan-tx_bytes: float
lan-tx_packets: float
last_seen: float
license_state: string
loadavg_1: float
loadavg_5: float
loadavg_15: float
mem: float
mem_buffer: float
mem_total: float
mem_used: float
num_desktop: float
num_handheld: float
num_mobile: float
rx_bytes: float
source: string
speedtest-status_latency: float
speedtest-status_ping: float
speedtest-status_rundate: float
speedtest-status_runtime: float
speedtest-status_xput_download: float
speedtest-status_xput_upload: float
state: float
storage_bar_pct: int
storage_bar_size: float
storage_bar_used: float
storage_foo_pct: int
storage_foo_size: float
storage_foo_used: float
system_uptime: float
temp_cpu: int
temp_memory: int
temp_network: int
temp_probe: int
temp_sys: int
tx_bytes: float
uplink_latency: float
uplink_name: string
uplink_speed: float
uplink_type: string
upgradeable: bool
uptime: float
user-num_sta: float
version: string
usg_networks:
tags:
- device_name
- domain_name
- enabled
- ip
- is_guest
- mac
- name
- purpose
- site_name
- source
- up
fields:
num_sta: float
rx_bytes: float
rx_packets: float
tx_bytes: float
tx_packets: float
usg_wan_ports:
tags:
- device_name
- enabled
- ifname
- ip
- mac
- purpose
- site_name
- source
- type
- up
fields:
bytes-r: float
full_duplex: bool
gateway: string
is_uplink: bool
max_speed: float
rx_broadcast: float
rx_bytes: float
rx_bytes-r: float
rx_dropped: float
rx_errors: float
rx_multicast: float
rx_packets: float
speed: float
tx_broadcast: float
tx_bytes: float
tx_bytes-r: float
tx_dropped: float
tx_errors: float
tx_multicast: float
tx_packets: float
usw:
tags:
- mac
- model
- name
- serial
- site_name
- source
- type
- version
fields:
bytes: float
guest-num_sta: float
ip: string
last_seen: float
rx_bytes: float
stat_bytes: float
stat_rx_bytes: float
stat_rx_crypts: float
stat_rx_dropped: float
stat_rx_errors: float
stat_rx_frags: float
stat_rx_packets: float
stat_tx_bytes: float
stat_tx_dropped: float
stat_tx_errors: float
stat_tx_packets: float
stat_tx_retries: float
tx_bytes: float
upgradeable: bool
uptime: float
usw_ports:
tags:
- device_name
- flowctrl_rx
- flowctrl_tx
- has_sfp
- media
- name
- poe_enable
- poe_mode
- port_id
- port_idx
- port_poe
- sfp_compliance
- sfp_part
- sfp_serial
- sfp_vendor
- site_name
- source
- type
fields:
dbytes_r: float
poe_current: float
poe_power: float
poe_voltage: float
rx_broadcast: float
rx_bytes: float
rx_bytes-r: float
rx_dropped: float
rx_errors: float
rx_multicast: float
rx_packets: float
sfp_current: float
sfp_rxpower: float
sfp_temperature: float
sfp_txpower: float
sfp_voltage: float
speed: float
stp_pathcost: float
tx_broadcast: float
tx_bytes: float
tx_bytes-r: float
tx_dropped: float
tx_errors: float
tx_multicast: float
tx_packets: float

View File

@ -14,6 +14,7 @@ func (u *InfluxUnifi) Logf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "info"},
})
if u.Collector != nil {
u.Collector.Logf(msg, v...)
}
@ -26,6 +27,7 @@ func (u *InfluxUnifi) LogErrorf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "error"},
})
if u.Collector != nil {
u.Collector.LogErrorf(msg, v...)
}
@ -38,6 +40,7 @@ func (u *InfluxUnifi) LogDebugf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "debug"},
})
if u.Collector != nil {
u.Collector.LogDebugf(msg, v...)
}

View File

@ -61,6 +61,7 @@ func (u *InfluxUnifi) batchSiteDPI(r report, v any) {
s, ok := v.(*unifi.DPITable)
if !ok {
u.LogErrorf("invalid type given to batchSiteDPI: %T", v)
return
}

View File

@ -1,6 +1,8 @@
package influxunifi
import (
"strings"
"github.com/unpoller/unifi"
)
@ -203,7 +205,7 @@ func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.Ra
}
for _, t := range rts {
if t.Name == p.Name {
if strings.EqualFold(t.Name, p.Name) {
fields["ast_be_xmit"] = t.AstBeXmit.Val
fields["channel"] = t.Channel.Val
fields["cu_self_rx"] = t.CuSelfRx.Val

View File

@ -1,7 +1,6 @@
package influxunifi
import (
"strconv"
"strings"
"github.com/unpoller/unifi"
@ -23,6 +22,10 @@ func Combine(in ...map[string]any) map[string]any {
return out
}
func sanitizeName(v string) string {
return strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(v, " ", "_"), ")", ""), "(", ""))
}
// batchSysStats is used by all device types.
func (u *InfluxUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]any {
m := map[string]any{
@ -38,11 +41,10 @@ func (u *InfluxUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[
}
for k, v := range ss.Temps {
temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
temp := v.CelsiusInt64()
if temp != 0 && k != "" {
m["temp_"+strings.ToLower(k)] = temp
m["temp_"+sanitizeName(k)] = temp
}
}
@ -53,7 +55,7 @@ func (u *InfluxUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]any {
output := make(map[string]any)
for _, t := range temps {
output["temp_"+t.Name] = t.Value
output["temp_"+sanitizeName(t.Name)] = t.Value
}
return output
@ -63,13 +65,13 @@ func (u *InfluxUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]any {
output := make(map[string]any)
for _, t := range storage {
output["storage_"+t.Name+"_size"] = t.Size.Val
output["storage_"+t.Name+"_used"] = t.Used.Val
output["storage_"+sanitizeName(t.Name)+"_size"] = t.Size.Val
output["storage_"+sanitizeName(t.Name)+"_used"] = t.Used.Val
if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
output["storage_"+sanitizeName(t.Name)+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
} else {
output["storage_"+t.Name+"_pct"] = 0
output["storage_"+sanitizeName(t.Name)+"_pct"] = 0
}
}

View File

@ -24,14 +24,17 @@ func (u *InfluxUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
"serial": s.Serial,
"type": s.Type,
}
var gw *unifi.Gw = nil
var gw *unifi.Gw
if s.Stat != nil {
gw = s.Stat.Gw
}
var sw *unifi.Sw = nil
var sw *unifi.Sw
if s.Stat != nil {
sw = s.Stat.Sw
}
fields := Combine(
u.batchUDMstorage(s.Storage),
u.batchUDMtemps(s.Temperatures),

View File

@ -11,6 +11,8 @@ import (
/* Event collection. Events are also sent to the webserver for display. */
func (u *InputUnifi) collectControllerEvents(c *Controller) ([]any, error) {
u.LogDebugf("Collecting controller events: %s (%s)", c.URL, c.ID)
if u.isNill(c) {
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
@ -45,6 +47,8 @@ func (u *InputUnifi) collectControllerEvents(c *Controller) ([]any, error) {
func (u *InputUnifi) collectAlarms(logs []any, sites []*unifi.Site, c *Controller) ([]any, error) {
if *c.SaveAlarms {
u.LogDebugf("Collecting controller alarms: %s (%s)", c.URL, c.ID)
for _, s := range sites {
events, err := c.Unifi.GetAlarmsSite(s)
if err != nil {
@ -69,6 +73,8 @@ func (u *InputUnifi) collectAlarms(logs []any, sites []*unifi.Site, c *Controlle
func (u *InputUnifi) collectAnomalies(logs []any, sites []*unifi.Site, c *Controller) ([]any, error) {
if *c.SaveAnomal {
u.LogDebugf("Collecting controller anomalies: %s (%s)", c.URL, c.ID)
for _, s := range sites {
events, err := c.Unifi.GetAnomaliesSite(s)
if err != nil {
@ -92,6 +98,8 @@ func (u *InputUnifi) collectAnomalies(logs []any, sites []*unifi.Site, c *Contro
func (u *InputUnifi) collectEvents(logs []any, sites []*unifi.Site, c *Controller) ([]any, error) {
if *c.SaveEvents {
u.LogDebugf("Collecting controller site events: %s (%s)", c.URL, c.ID)
for _, s := range sites {
events, err := c.Unifi.GetSiteEvents(s, time.Hour)
if err != nil {
@ -117,6 +125,8 @@ func (u *InputUnifi) collectEvents(logs []any, sites []*unifi.Site, c *Controlle
func (u *InputUnifi) collectIDS(logs []any, sites []*unifi.Site, c *Controller) ([]any, error) {
if *c.SaveIDS {
u.LogDebugf("Collecting controller IDS data: %s (%s)", c.URL, c.ID)
for _, s := range sites {
events, err := c.Unifi.GetIDSSite(s)
if err != nil {
@ -149,6 +159,7 @@ func redactEvent(e *unifi.Event, hash *bool, dropPII *bool) *unifi.Event {
// metrics.Events[i].Msg <-- not sure what to do here.
e.DestIPGeo = unifi.IPGeo{}
e.SourceIPGeo = unifi.IPGeo{}
if *dropPII {
e.Host = ""
e.Hostname = ""

View File

@ -50,6 +50,7 @@ func (u *InputUnifi) dynamicController(filter *poller.Filter) (*poller.Metrics,
if err := u.getUnifi(c); err != nil {
u.logController(c)
return nil, fmt.Errorf("authenticating to %s: %w", filter.Path, err)
}
@ -60,6 +61,8 @@ func (u *InputUnifi) dynamicController(filter *poller.Filter) (*poller.Metrics,
}
func (u *InputUnifi) collectController(c *Controller) (*poller.Metrics, error) {
u.LogDebugf("Collecting controller data: %s (%s)", c.URL, c.ID)
if u.isNill(c) {
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
@ -84,6 +87,7 @@ func (u *InputUnifi) collectController(c *Controller) (*poller.Metrics, error) {
func (u *InputUnifi) pollController(c *Controller) (*poller.Metrics, error) {
u.RLock()
defer u.RUnlock()
u.LogDebugf("polling controller: %s (%s)", c.URL, c.ID)
// Get the sites we care about.
sites, err := u.getFilteredSites(c)

View File

@ -132,6 +132,7 @@ func (u *InputUnifi) getUnifi(c *Controller) error {
})
if err != nil {
c.Unifi = nil
return fmt.Errorf("unifi controller: %w", err)
}
@ -166,6 +167,7 @@ func (u *InputUnifi) checkSites(c *Controller) error {
if StringInSlice("all", c.Sites) {
c.Sites = []string{"all"}
return nil
}
@ -176,6 +178,7 @@ FIRST:
for _, site := range sites {
if s == site.Name {
keep = append(keep, s)
continue FIRST
}
}

View File

@ -26,6 +26,7 @@ func (u *InputUnifi) Initialize(l poller.Logger) error {
if u.Logger = l; u.Disable {
u.Logf("UniFi input plugin disabled or missing configuration!")
return nil
}
@ -41,6 +42,7 @@ func (u *InputUnifi) Initialize(l poller.Logger) error {
for i, c := range u.Controllers {
if err := u.getUnifi(u.setControllerDefaults(c)); err != nil {
u.LogErrorf("Controller %d of %d Auth or Connection Error, retrying: %v", i+1, len(u.Controllers), err)
continue
}
@ -61,6 +63,7 @@ func (u *InputUnifi) DebugInput() (bool, error) {
if u == nil || u.Config == nil {
return true, nil
}
if u.setDefaults(&u.Default); len(u.Controllers) == 0 && !u.Dynamic {
u.Controllers = []*Controller{&u.Default}
}
@ -71,27 +74,35 @@ func (u *InputUnifi) DebugInput() (bool, error) {
}
allOK := true
var allErrors error
for i, c := range u.Controllers {
if err := u.getUnifi(u.setControllerDefaults(c)); err != nil {
u.LogErrorf("Controller %d of %d Auth or Connection Error, retrying: %v", i+1, len(u.Controllers), err)
allOK = false
if allErrors != nil {
allErrors = fmt.Errorf("%v: %w", err, allErrors)
} else {
allErrors = err
}
continue
}
if err := u.checkSites(c); err != nil {
u.LogErrorf("checking sites on %s: %v", c.URL, err)
allOK = false
if allErrors != nil {
allErrors = fmt.Errorf("%v: %w", err, allErrors)
} else {
allErrors = err
}
continue
}

View File

@ -191,6 +191,7 @@ func (u *InputUnifi) Logf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "info"},
})
if u.Logger != nil {
u.Logger.Logf(msg, v...)
}
@ -203,6 +204,7 @@ func (u *InputUnifi) LogErrorf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "error"},
})
if u.Logger != nil {
u.Logger.LogErrorf(msg, v...)
}
@ -215,6 +217,7 @@ func (u *InputUnifi) LogDebugf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "debug"},
})
if u.Logger != nil {
u.Logger.LogDebugf(msg, v...)
}

View File

@ -14,6 +14,7 @@ func (l *Loki) Logf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "info"},
})
if l.Collect != nil {
l.Collect.Logf(msg, v...)
}
@ -26,6 +27,7 @@ func (l *Loki) LogErrorf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "error"},
})
if l.Collect != nil {
l.Collect.LogErrorf(msg, v...)
}
@ -38,6 +40,7 @@ func (l *Loki) LogDebugf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "debug"},
})
if l.Collect != nil {
l.Collect.LogDebugf(msg, v...)
}

View File

@ -67,12 +67,15 @@ func (l *Loki) Enabled() bool {
if l == nil {
return false
}
if l.Config == nil {
return false
}
if l.URL == "" {
return false
}
return !l.Disable
}
@ -80,12 +83,15 @@ func (l *Loki) DebugOutput() (bool, error) {
if l == nil {
return true, nil
}
if !l.Enabled() {
return true, nil
}
if err := l.ValidateConfig(); err != nil {
return false, err
}
return true, nil
}
@ -94,12 +100,15 @@ func (l *Loki) Run(collect poller.Collect) error {
l.Collect = collect
if !l.Enabled() {
l.LogDebugf("Loki config missing (or disabled), Loki output disabled!")
return nil
}
l.Logf("Loki enabled")
if err := l.ValidateConfig(); err != nil {
l.LogErrorf("invalid loki config")
return err
}
@ -126,6 +135,7 @@ func (l *Loki) ValidateConfig() error {
pass, err := os.ReadFile(strings.TrimPrefix(l.Password, "file://"))
if err != nil {
l.LogErrorf("Reading Loki Password File: %v", err)
return fmt.Errorf("error reading password file")
}
@ -135,6 +145,7 @@ func (l *Loki) ValidateConfig() error {
l.last = time.Now().Add(-l.Interval.Duration)
l.client = l.httpClient()
l.URL = strings.TrimRight(l.URL, "/") // gets a path appended to it later.
return nil
}
@ -142,6 +153,7 @@ func (l *Loki) ValidateConfig() error {
// This is started by Run().
func (l *Loki) PollController() {
interval := l.Interval.Round(time.Second)
l.Logf("Loki Event collection started, interval: %v, URL: %s", interval, l.URL)
ticker := time.NewTicker(interval)
@ -149,6 +161,7 @@ func (l *Loki) PollController() {
events, err := l.Collect.Events(&poller.Filter{Name: InputName})
if err != nil {
l.LogErrorf("event fetch for Loki failed: %v", err)
continue
}
@ -172,6 +185,7 @@ func (l *Loki) ProcessEvents(report *Report, events *poller.Events) error {
}
l.last = report.Start
l.Logf("Events sent to Loki. %v", report)
return nil

View File

@ -5,5 +5,6 @@ package main
func (p *plugin) runCollector() error {
p.Logf("mysql plugin is not finished")
return nil
}

View File

@ -60,12 +60,15 @@ func (p *plugin) Enabled() bool {
if p == nil {
return false
}
if p.Config == nil {
return false
}
if p.Collect == nil {
return false
}
return !p.Disable
}
@ -73,12 +76,15 @@ func (p *plugin) DebugOutput() (bool, error) {
if p == nil {
return true, nil
}
if !p.Enabled() {
return true, nil
}
if err := p.validateConfig(); err != nil {
return false, err
}
return true, nil
}

View File

@ -62,19 +62,25 @@ func (u *UnifiPoller) DebugIO() error {
defer outputSync.RUnlock()
allOK := true
var allErr error
u.Logf("Checking inputs...")
totalInputs := len(inputs)
for i, input := range inputs {
u.Logf("\t(%d/%d) Checking input %s...", i+1, totalInputs, input.Name)
ok, err := input.DebugInput()
if !ok {
u.LogErrorf("\t\t %s Failed: %v", input.Name, err)
allOK = false
} else {
u.Logf("\t\t %s is OK", input.Name)
}
if err != nil {
if allErr == nil {
allErr = err
@ -85,16 +91,21 @@ func (u *UnifiPoller) DebugIO() error {
}
u.Logf("Checking outputs...")
totalOutputs := len(outputs)
for i, output := range outputs {
u.Logf("\t(%d/%d) Checking output %s...", i+1, totalOutputs, output.Name)
ok, err := output.DebugOutput()
if !ok {
u.LogErrorf("\t\t %s Failed: %v", output.Name, err)
allOK = false
} else {
u.Logf("\t\t %s is OK", output.Name)
}
if err != nil {
if allErr == nil {
allErr = err
@ -103,8 +114,10 @@ func (u *UnifiPoller) DebugIO() error {
}
}
}
if !allOK {
u.LogErrorf("No all checks passed, please fix the logged issues.")
}
return allErr
}

View File

@ -70,27 +70,35 @@ func (u *UnifiPoller) InitializeInputs() error {
// parallelize startup
u.LogDebugf("initializing %d inputs", len(inputs))
for _, input := range inputs {
wg.Add(1)
go func(input *InputPlugin) {
defer wg.Done()
// This must return, or the app locks up here.
u.LogDebugf("inititalizing input... %s", input.Name)
if err := input.Initialize(u); err != nil {
u.LogDebugf("error initializing input ... %s", input.Name)
errChan <- err
return
}
u.LogDebugf("input successfully initialized ... %s", input.Name)
errChan <- nil
}(input)
}
wg.Wait()
close(errChan)
u.LogDebugf("collecting input errors...")
// collect errors if any.
errs := make([]error, 0)
for err := range errChan {
if err != nil {
errs = append(errs, err)
@ -104,6 +112,7 @@ func (u *UnifiPoller) InitializeInputs() error {
err = errors.Wrap(err, e.Error())
}
}
u.LogDebugf("returning error: %w", err)
return err
@ -114,39 +123,43 @@ type eventInputResult struct {
err error
}
// Events aggregates log messages (events) from one or more sources.
func (u *UnifiPoller) Events(filter *Filter) (*Events, error) {
inputSync.RLock()
defer inputSync.RUnlock()
func collectEvents(filter *Filter, inputs []*InputPlugin) (*Events, error) {
resultChan := make(chan eventInputResult, len(inputs))
wg := &sync.WaitGroup{}
for _, input := range inputs {
wg.Add(1)
go func(input *InputPlugin) {
defer wg.Done()
if filter != nil &&
filter.Name != "" &&
!strings.EqualFold(input.Name, filter.Name) {
resultChan <- eventInputResult{}
return
}
e, err := input.Events(filter)
if err != nil {
resultChan <- eventInputResult{err: err}
return
}
resultChan <- eventInputResult{logs: e.Logs}
resultChan <- eventInputResult{logs: e.Logs}
}(input)
}
wg.Wait()
close(resultChan)
events := Events{}
errs := make([]error, 0)
for result := range resultChan {
if result.err != nil {
errs = append(errs, result.err)
@ -155,6 +168,7 @@ func (u *UnifiPoller) Events(filter *Filter) (*Events, error) {
events.Logs = append(events.Logs, result.logs...)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("error initializing inputs")
@ -166,28 +180,34 @@ func (u *UnifiPoller) Events(filter *Filter) (*Events, error) {
return &events, err
}
// Events aggregates log messages (events) from one or more sources.
func (u *UnifiPoller) Events(filter *Filter) (*Events, error) {
inputSync.RLock()
defer inputSync.RUnlock()
return collectEvents(filter, inputs)
}
type metricInputResult struct {
metric *Metrics
err error
}
// Metrics aggregates all the measurements from filtered inputs and returns them.
// Passing a null filter returns everything!
func (u *UnifiPoller) Metrics(filter *Filter) (*Metrics, error) {
inputSync.RLock()
defer inputSync.RUnlock()
func collectMetrics(filter *Filter, inputs []*InputPlugin) (*Metrics, error) {
resultChan := make(chan metricInputResult, len(inputs))
wg := &sync.WaitGroup{}
for _, input := range inputs {
wg.Add(1)
go func(input *InputPlugin) {
defer wg.Done()
if filter != nil &&
filter.Name != "" &&
!strings.EqualFold(input.Name, filter.Name) {
resultChan <- metricInputResult{}
return
}
@ -195,11 +215,14 @@ func (u *UnifiPoller) Metrics(filter *Filter) (*Metrics, error) {
resultChan <- metricInputResult{metric: m, err: err}
}(input)
}
wg.Wait()
close(resultChan)
errs := make([]error, 0)
metrics := &Metrics{}
for result := range resultChan {
if result.err != nil {
errs = append(errs, result.err)
@ -219,6 +242,15 @@ func (u *UnifiPoller) Metrics(filter *Filter) (*Metrics, error) {
return metrics, err
}
// Metrics aggregates all the measurements from filtered inputs and returns them.
// Passing a null filter returns everything!
func (u *UnifiPoller) Metrics(filter *Filter) (*Metrics, error) {
inputSync.RLock()
defer inputSync.RUnlock()
return collectMetrics(filter, inputs)
}
// AppendMetrics combines the metrics from two sources.
func AppendMetrics(existing *Metrics, m *Metrics) *Metrics {
if existing == nil {

View File

@ -80,20 +80,25 @@ func (u *UnifiPoller) InitializeOutputs() error {
}
func (u *UnifiPoller) runOutputMethods() (int, chan error) {
// Output plugin errors go into this channel.
err := make(chan error)
outputSync.RLock()
defer outputSync.RUnlock()
return runOutputMethods(outputs, u, u)
}
func runOutputMethods(outputs []*Output, l Logger, c Collect) (int, chan error) {
// Output plugin errors go into this channel.
err := make(chan error)
for _, o := range outputs {
if o != nil && o.Enabled() {
u.LogDebugf("output plugin enabled, starting run loop for %s", o.Name)
l.LogDebugf("output plugin enabled, starting run loop for %s", o.Name)
go func(o *Output) {
err <- o.Run(u) // Run each output plugin
err <- o.Run(c) // Run each output plugin
}(o)
} else {
u.LogDebugf("output plugin disabled for %s", o.Name)
l.LogDebugf("output plugin disabled for %s", o.Name)
}
}

View File

@ -26,6 +26,7 @@ func (u *UnifiPoller) Start() error {
if u.Flags.ShowVer {
fmt.Println(version.Print(AppName))
return nil // don't run anything else w/ version request.
}
@ -53,7 +54,9 @@ func (u *UnifiPoller) Start() error {
if err != nil {
os.Exit(1)
}
log.Fatal("Failed debug checks")
return err
}

108
pkg/poller/testutil.go Normal file
View File

@ -0,0 +1,108 @@
package poller
import (
"sync"
)
type TestCollector struct {
sync.RWMutex
Logger Logger
inputs []*InputPlugin
poller *Poller
}
var _ Collect = &TestCollector{}
func NewTestCollector(l testLogger) *TestCollector {
return &TestCollector{
Logger: NewTestLogger(l),
}
}
func (t *TestCollector) AddInput(input *InputPlugin) {
t.Lock()
defer t.Unlock()
t.inputs = append(t.inputs, input)
}
func (t *TestCollector) Metrics(filter *Filter) (*Metrics, error) {
t.RLock()
defer t.RUnlock()
return collectMetrics(filter, t.inputs)
}
func (t *TestCollector) Events(filter *Filter) (*Events, error) {
t.RLock()
defer t.RUnlock()
return collectEvents(filter, t.inputs)
}
func (t *TestCollector) SetPoller(poller *Poller) {
t.Lock()
defer t.Unlock()
t.poller = poller
}
func (t *TestCollector) Poller() Poller {
return *t.poller
}
func (t *TestCollector) Inputs() (names []string) {
t.RLock()
defer t.RUnlock()
for i := range t.inputs {
names = append(names, inputs[i].Name)
}
return names
}
func (t *TestCollector) Outputs() []string {
return []string{}
}
func (t *TestCollector) Logf(m string, v ...any) {
t.Logger.Logf(m, v...)
}
func (t *TestCollector) LogErrorf(m string, v ...any) {
t.Logger.LogErrorf(m, v...)
}
func (t *TestCollector) LogDebugf(m string, v ...any) {
t.Logger.LogDebugf(m, v...)
}
type testLogger interface {
Log(args ...any)
Logf(format string, args ...any)
}
type TestLogger struct {
log testLogger
}
func NewTestLogger(l testLogger) *TestLogger {
return &TestLogger{log: l}
}
// Logf prints a log entry if quiet is false.
func (t *TestLogger) Logf(m string, v ...any) {
t.log.Logf("[INFO] "+m, v...)
}
// LogDebugf prints a debug log entry if debug is true and quite is false.
func (t *TestLogger) LogDebugf(m string, v ...any) {
t.log.Logf("[DEBUG] "+m, v...)
}
// LogErrorf prints an error log entry.
func (t *TestLogger) LogErrorf(m string, v ...any) {
t.log.Logf("[ERROR] "+m, v...)
}

View File

@ -78,6 +78,7 @@ func (u *promUnifi) exportClientDPI(r report, v any, appTotal, catTotal totalsDP
s, ok := v.(*unifi.DPITable)
if !ok {
u.LogErrorf("invalid type given to ClientsDPI: %T", v)
return
}
@ -166,6 +167,7 @@ func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.D
if _, ok := m[controller][site][name]; !ok {
m[controller][site][name] = dpi
return
}

View File

@ -121,12 +121,15 @@ func (u *promUnifi) DebugOutput() (bool, error) {
if u == nil {
return true, nil
}
if !u.Enabled() {
return true, nil
}
if u.HTTPListen == "" {
return false, fmt.Errorf("invalid listen string")
}
// check the port
parts := strings.Split(u.HTTPListen, ":")
if len(parts) != 2 {
@ -137,7 +140,9 @@ func (u *promUnifi) DebugOutput() (bool, error) {
if err != nil {
return false, err
}
_ = ln.Close()
return true, nil
}
@ -145,9 +150,11 @@ func (u *promUnifi) Enabled() bool {
if u == nil {
return false
}
if u.Config == nil {
return false
}
return !u.Disable
}
@ -157,8 +164,10 @@ func (u *promUnifi) Run(c poller.Collect) error {
u.Collector = c
if u.Config == nil || !u.Enabled() {
u.LogDebugf("Prometheus config missing (or disabled), Prometheus HTTP listener disabled!")
return nil
}
u.Logf("Prometheus is enabled")
u.Namespace = strings.Trim(strings.ReplaceAll(u.Namespace, "-", "_"), "_")
@ -200,9 +209,11 @@ func (u *promUnifi) Run(c poller.Collect) error {
switch u.SSLKeyPath == "" && u.SSLCrtPath == "" {
case true:
u.Logf("Prometheus exported at http://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
return http.ListenAndServe(u.HTTPListen, mux)
default:
u.Logf("Prometheus exported at https://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
return http.ListenAndServeTLS(u.HTTPListen, u.SSLCrtPath, u.SSLKeyPath, mux)
}
}
@ -249,7 +260,7 @@ func (u *promUnifi) ScrapeHandler(w http.ResponseWriter, r *http.Request) {
).ServeHTTP(w, r)
}
func (u *promUnifi) DefaultHandler(w http.ResponseWriter, r *http.Request) {
func (u *promUnifi) DefaultHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(poller.AppName + "\n"))
}

View File

@ -14,6 +14,7 @@ func (u *promUnifi) Logf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "info"},
})
if u.Collector != nil {
u.Collector.Logf(msg, v...)
}
@ -26,6 +27,7 @@ func (u *promUnifi) LogErrorf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "error"},
})
if u.Collector != nil {
u.Collector.LogErrorf(msg, v...)
}
@ -38,6 +40,7 @@ func (u *promUnifi) LogDebugf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "debug"},
})
if u.Collector != nil {
u.Collector.LogDebugf(msg, v...)
}

View File

@ -143,9 +143,11 @@ func (u *promUnifi) exportPDU(r report, d *unifi.PDU) {
if d.OutletACPowerConsumption.Txt != "" {
r.send([]*metric{{u.Device.OutletACPowerConsumption, gauge, d.OutletACPowerConsumption, labels}})
}
if d.PowerSource.Txt != "" {
r.send([]*metric{{u.Device.PowerSource, gauge, d.PowerSource, labels}})
}
if d.TotalMaxPower.Txt != "" {
r.send([]*metric{{u.Device.TotalMaxPower, gauge, d.TotalMaxPower, labels}})
}
@ -241,7 +243,6 @@ func (u *promUnifi) exportPDUPrtTable(r report, labels []string, pt []unifi.Port
func (u *promUnifi) exportPDUOutletTable(r report, labels []string, ot []unifi.OutletTable, oto []unifi.OutletOverride) {
// Per-outlet data on a switch
for _, o := range ot {
// Copy labels, and add four new ones.
labelOutlet := []string{
labels[2] + " Outlet " + o.Index.Txt, o.Index.Txt,
@ -261,7 +262,6 @@ func (u *promUnifi) exportPDUOutletTable(r report, labels []string, ot []unifi.O
// Per-outlet data on a switch
for _, o := range oto {
// Copy labels, and add four new ones.
labelOutlet := []string{
labels[2] + " Outlet Override " + o.Index.Txt, o.Index.Txt,

View File

@ -79,6 +79,7 @@ func (u *promUnifi) exportSiteDPI(r report, v any) {
s, ok := v.(*unifi.DPITable)
if !ok {
u.LogErrorf("invalid type given to SiteDPI: %T", v)
return
}

View File

@ -1,7 +1,6 @@
package promunifi
import (
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
@ -87,7 +86,7 @@ func (u *promUnifi) exportUSG(r report, d *unifi.USG) {
}
for k, v := range d.SystemStats.Temps {
temp, _ := strconv.ParseInt(strings.Split(v, " ")[0], 10, 64)
temp := v.CelsiusInt64()
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
if k = strings.ToLower(k); temp != 0 && k != "" {

View File

@ -9,11 +9,13 @@ func (u *promUnifi) exportUXG(r report, d *unifi.UXG) {
if !d.Adopted.Val || d.Locating.Val {
return
}
var gw *unifi.Gw = nil
var gw *unifi.Gw
if d.Stat != nil {
gw = d.Stat.Gw
}
var sw *unifi.Sw = nil
var sw *unifi.Sw
if d.Stat != nil {
sw = d.Stat.Sw
}

66
pkg/testutil/dep.go Normal file
View File

@ -0,0 +1,66 @@
package testutil
import (
"testing"
"github.com/unpoller/unifi/mocks"
"github.com/unpoller/unpoller/pkg/inputunifi"
"github.com/unpoller/unpoller/pkg/poller"
)
type TestRig struct {
MockServer *mocks.MockHTTPTestServer
Collector *poller.TestCollector
InputUnifi *inputunifi.InputUnifi
Controller *inputunifi.Controller
}
func NewTestSetup(t *testing.T) *TestRig {
srv := mocks.NewMockHTTPTestServer()
testCollector := poller.NewTestCollector(t)
enabled := true
controller := inputunifi.Controller{
SaveAnomal: &enabled,
SaveAlarms: &enabled,
SaveEvents: &enabled,
SaveIDS: &enabled,
SaveDPI: &enabled,
SaveRogue: &enabled,
SaveSites: &enabled,
URL: srv.Server.URL,
}
in := &inputunifi.InputUnifi{
Logger: testCollector.Logger,
Config: &inputunifi.Config{
Disable: false,
Default: controller,
Controllers: []*inputunifi.Controller{&controller},
},
}
testCollector.AddInput(&poller.InputPlugin{
Name: "unifi",
Input: in,
})
return &TestRig{
MockServer: srv,
Collector: testCollector,
InputUnifi: in,
Controller: &controller,
}
}
func (t *TestRig) Initialize() {
_ = t.InputUnifi.Initialize(t.Collector.Logger)
_, _ = t.InputUnifi.Metrics(nil)
_, _ = t.InputUnifi.Events(nil)
}
func (t *TestRig) Close() {
t.MockServer.Server.Close()
}
func PBool(v bool) *bool {
return &v
}

62
pkg/testutil/sets.go Normal file
View File

@ -0,0 +1,62 @@
package testutil
type Set[K comparable] struct {
entities map[K]any
}
func NewSetFromMap[K comparable, V any](m map[K]V) *Set[K] {
entities := make(map[K]any, 0)
for k := range m {
entities[k] = true
}
return &Set[K]{
entities: entities,
}
}
func NewSetFromSlice[K comparable](s []K) *Set[K] {
entities := make(map[K]any, 0)
for _, k := range s {
entities[k] = true
}
return &Set[K]{
entities: entities,
}
}
func (s *Set[K]) Difference(other *Set[K]) (additions []K, deletions []K) {
additions = make([]K, 0)
for i := range s.entities {
if _, ok := other.entities[i]; !ok {
additions = append(additions, i)
}
}
deletions = make([]K, 0)
for j := range other.entities {
if _, ok := s.entities[j]; !ok {
deletions = append(deletions, j)
}
}
return additions, deletions
}
func (s *Set[K]) Len() int {
return len(s.entities)
}
func (s *Set[K]) Slice() []K {
ret := make([]K, 0)
for k := range s.entities {
ret = append(ret, k)
}
return ret
}

39
pkg/testutil/sets_test.go Normal file
View File

@ -0,0 +1,39 @@
package testutil_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/unpoller/unpoller/pkg/testutil"
)
func TestSets(t *testing.T) {
s1 := testutil.NewSetFromSlice[string]([]string{"a", "b", "c", "c"})
assert.Len(t, s1.Slice(), 3)
assert.Contains(t, s1.Slice(), "a")
assert.Contains(t, s1.Slice(), "b")
assert.Contains(t, s1.Slice(), "c")
s2 := testutil.NewSetFromMap[string](map[string]bool{
"c": true,
"d": false,
"e": true,
})
assert.Len(t, s2.Slice(), 3)
assert.Contains(t, s2.Slice(), "c")
assert.Contains(t, s2.Slice(), "d")
assert.Contains(t, s2.Slice(), "e")
additions, deletions := s1.Difference(s2)
assert.Len(t, additions, 2)
assert.Len(t, deletions, 2)
assert.Contains(t, additions, "a")
assert.Contains(t, additions, "b")
assert.Contains(t, deletions, "d")
assert.Contains(t, deletions, "e")
}

View File

@ -17,7 +17,7 @@ func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
}
// Arbitrary /health handler.
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleHealth(w http.ResponseWriter, _ *http.Request) {
s.handleDone(w, []byte("OK"), mimeHTML)
}
@ -64,6 +64,7 @@ func (s *Server) handleOutput(w http.ResponseWriter, r *http.Request) {
c := s.plugins.getOutput(vars["output"])
if c == nil {
s.handleMissing(w, r)
return
}
@ -100,6 +101,7 @@ func (s *Server) handleInput(w http.ResponseWriter, r *http.Request) { //nolint:
c := s.plugins.getInput(vars["input"])
if c == nil {
s.handleMissing(w, r)
return
}

View File

@ -12,6 +12,7 @@ func (s *Server) Logf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "info"},
})
if s.Collect != nil {
s.Collect.Logf(msg, v...)
}
@ -24,6 +25,7 @@ func (s *Server) LogErrorf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "error"},
})
if s.Collect != nil {
s.Collect.LogErrorf(msg, v...)
}
@ -36,6 +38,7 @@ func (s *Server) LogDebugf(msg string, v ...any) {
Msg: fmt.Sprintf(msg, v...),
Tags: map[string]string{"type": "debug"},
})
if s.Collect != nil {
s.Collect.LogDebugf(msg, v...)
}

View File

@ -107,6 +107,7 @@ func (w *webPlugins) updateInput(config *Input) {
input := w.getInput(config.Name)
if input == nil {
w.newInput(config)
return
}
@ -142,6 +143,7 @@ func (w *webPlugins) updateOutput(config *Output) {
output := w.getOutput(config.Name)
if output == nil {
w.newOutput(config)
return
}

View File

@ -71,9 +71,11 @@ func (s *Server) Enabled() bool {
if s == nil {
return false
}
if s.Config == nil {
return false
}
return s.Enable
}
@ -82,8 +84,10 @@ func (s *Server) Run(c poller.Collect) error {
s.Collect = c
if s.Config == nil || s.Port == 0 || s.HTMLPath == "" || !s.Enabled() {
s.LogDebugf("Internal web server disabled!")
return nil
}
s.Logf("Internal web server enabled")
if _, err := os.Stat(s.HTMLPath); err != nil {
@ -99,9 +103,11 @@ func (s *Server) DebugOutput() (bool, error) {
if s == nil {
return true, nil
}
if !s.Enabled() {
return true, nil
}
if s.HTMLPath == "" {
return true, nil
}
@ -116,6 +122,7 @@ func (s *Server) DebugOutput() (bool, error) {
if err != nil {
return false, err
}
_ = ln.Close()
return true, nil

View File

@ -24,6 +24,7 @@ func (s *Server) basicAuth(handler http.HandlerFunc) http.HandlerFunc {
return s.handleLog(func(w http.ResponseWriter, r *http.Request) {
if s.Accounts.PasswordIsCorrect(r.BasicAuth()) {
handler(w, r)
return
}
@ -41,6 +42,7 @@ func (s *Server) handleLog(handler http.HandlerFunc) http.HandlerFunc {
// Use custom ResponseWriter to catch and log response data.
response := &ResponseWriter{Writer: w, Start: time.Now()}
handler(response, r) // Run provided handler with custom ResponseWriter.
user, _, _ := r.BasicAuth()
@ -67,7 +69,7 @@ func (s *Server) handleLog(handler http.HandlerFunc) http.HandlerFunc {
}
// handleMissing returns a blank 404.
func (s *Server) handleMissing(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleMissing(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", mimeHTML)
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte("404 page not found\n"))
@ -93,6 +95,7 @@ func (s *Server) handleJSON(w http.ResponseWriter, data any) {
b, err := json.Marshal(data)
if err != nil {
s.handleError(w, err)
return
}