diff --git a/README.md b/README.md index 9415fcb6..b2ad05b8 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,8 @@ documentation support. This project succeeds because of them. Thank you!
## Copyright & License +
-- Copyright © 2018-2020 David Newhall II.
-- See [LICENSE](LICENSE) for license information.
+- Copyright © 2018-2020 David Newhall II.
+- See [LICENSE](LICENSE) for license information.
diff --git a/go.mod b/go.mod
index 93b76796..133cb4bf 100644
--- a/go.mod
+++ b/go.mod
@@ -1,12 +1,33 @@
module github.com/unpoller/unpoller
-go 1.16
+go 1.19
require (
- github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224
- github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c
- github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b
- github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a
- github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e
- github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99
+ github.com/DataDog/datadog-go v4.0.0+incompatible
+ github.com/gorilla/mux v1.8.0
+ github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
+ github.com/prometheus/client_golang v1.11.0
+ github.com/prometheus/common v0.29.0
+ github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c
+ golang.org/x/crypto v0.3.0
+ golang.org/x/term v0.2.0
+ golift.io/cnfg v0.1.1
+ golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5
+ golift.io/version v0.0.2
+)
+
+require golang.org/x/net v0.2.0 // indirect
+
+require (
+ github.com/BurntSushi/toml v1.0.0 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.1 // indirect
+ github.com/golang/protobuf v1.4.3 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/procfs v0.6.0 // indirect
+ github.com/unpoller/unifi v0.1.0
+ golang.org/x/sys v0.2.0 // indirect
+ google.golang.org/protobuf v1.26.0-rc.1 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
diff --git a/go.sum b/go.sum
index c8a96998..696bd304 100644
--- a/go.sum
+++ b/go.sum
@@ -31,92 +31,48 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v4.0.0+incompatible h1:Dq8Dr+4sV1gBO1sHDWdW+4G+PdsA+YSJOK925MxrrCY=
github.com/DataDog/datadog-go v4.0.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
-github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
-github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
-github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
-github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -140,11 +96,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -168,60 +121,23 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
-github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig=
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -231,177 +147,70 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
-github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
-github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
-github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
-github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c h1:zqmyTlQyufRC65JnImJ6H1Sf7BDj8bG31EV919NVEQc=
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224 h1:avnWIPsXSuOIT1x2oImsbOUpLvC0ACQxsPmhYm5P8/E=
-github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224/go.mod h1:/E0LxkzsPngrP+hevAaXOjVCOr8JWRosGbrvlV6reIk=
-github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c h1:T+T+jWgL3+4Bgy3VuTLNJLoShvmrfPuH7DxaYeB0gho=
-github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c/go.mod h1:GHqTS6Ry8fcVDPoPuIhI6e7HPVH6tSOZIJsQ5h2zmJo=
-github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b h1:dHFTRAxwm064wPA4SOijcMfOqayrywn5foKqz7iU2BQ=
-github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b/go.mod h1:Q8i0Sgji6D7zgRB11YoDmrmF17EEp+DtAH151IQaMKg=
-github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a h1:uYVTxnhNssppXV1R92GbQ5RxTYbjfB94xmwobcw/FbQ=
-github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a/go.mod h1:TVrEHyRoUWJczzYFuP6tCS3mR1gAA7+LV/YILVGEoLs=
-github.com/unpoller/poller v0.0.0-20210623101401-f12841d79a28 h1:YAv5naMdpOFahnxteFFRidZlrSEwLv8V2nBKJKmLmHg=
-github.com/unpoller/poller v0.0.0-20210623101401-f12841d79a28/go.mod h1:AbDp60t5WlLSRELAliMJ0RFQpm/0yXpyolVSZqNtero=
-github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e h1:tNBIBCmtc7whuhkjKyEzpU3OHzYHyGCBy/LERhHxh3A=
-github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e/go.mod h1:AbDp60t5WlLSRELAliMJ0RFQpm/0yXpyolVSZqNtero=
-github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99 h1:6x0qUKxU/A5UOUSoUGLbDuaGrXlRkOvdiWDGLnNC8BA=
-github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99/go.mod h1:xZQ+DIFUlI6XJqLHLEXxujWQwSzbESNtHtC0+njvOGA=
-github.com/unpoller/unifi v0.0.0-20210914213836-fd3c38c905a3/go.mod h1:K9QFFGfZws4gzB+Popix19S/rBKqrtqI+tyPORyg3F0=
-github.com/unpoller/unifi v0.0.0-20221124010147-8d83427af67b/go.mod h1:pJGPtjikPcYO+rZMpgYOj6Zs044Dl4R+u3MsV3TMenk=
-github.com/unpoller/unifi v0.0.9-0.20210623100314-3dccfdbc4c80 h1:XjHGfJhMwnB63DYHgtWGJgDxLhxVcAOtf+cfuvpGoyo=
-github.com/unpoller/unifi v0.0.9-0.20210623100314-3dccfdbc4c80/go.mod h1:K9QFFGfZws4gzB+Popix19S/rBKqrtqI+tyPORyg3F0=
-github.com/unpoller/webserver v0.0.0-20210623101543-90d89bb0acdf h1:HhXi3qca3kyFEFPh0mqdr0bpQs94hJvMbUJztwPtf2A=
-github.com/unpoller/webserver v0.0.0-20210623101543-90d89bb0acdf/go.mod h1:77PywuUvspdtoRuH1htFhR3Tp0pLyWj6kJlYR4tBYho=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/unpoller/unifi v0.1.0 h1:aN3DrL3C+lkAN9/yOvrv+2fiSm/IXswdDVi8v42P4vY=
+github.com/unpoller/unifi v0.1.0/go.mod h1:iZA8XU8CkuKHKcmK8me2zWBceAxnlpd6pEQGOvIPRY8=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
-golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -434,13 +243,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -451,7 +255,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -466,11 +269,9 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -486,15 +287,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -504,10 +299,8 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -527,34 +320,28 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -564,8 +351,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -573,7 +358,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -597,11 +381,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golift.io/cnfg v0.0.7 h1:qkNpP5Bq+5Gtoc6HcI8kapMD5zFOVan6qguxqBQF3OY=
-golift.io/cnfg v0.0.7/go.mod h1:AsB0DJe7nv0bizKaoy3e3MjjOF7upTpMOMvsfv4CNNk=
+golift.io/cnfg v0.1.1 h1:8T20+WhEgfThCu5D/Tf5CCFGORSCrTB8cgR29sXTpUE=
+golift.io/cnfg v0.1.1/go.mod h1:cjgsYXSEgyWJEbSk+QehZuGN26jw+1CzwceGCbJ0Lck=
+golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5 h1:W9EKFeDCvnMCXrXZ/z33fmDZZmUQYJPkUqlQ95Sbg+o=
+golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5/go.mod h1:oXt/WBhSizIK6GmS2ka85IQ4TLmL2BFh4jHKR0bbThI=
golift.io/version v0.0.2 h1:i0gXRuSDHKs4O0sVDUg4+vNIuOxYoXhaxspftu2FRTE=
golift.io/version v0.0.2/go.mod h1:76aHNz8/Pm7CbuxIsDi97jABL5Zui3f2uZxDm4vB6hU=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -619,7 +404,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
@@ -630,7 +414,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -655,15 +438,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -682,33 +460,22 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -719,5 +486,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/main.go b/main.go
index f8e0b81f..25916ad6 100644
--- a/main.go
+++ b/main.go
@@ -6,14 +6,14 @@ import (
"os"
"time"
- "github.com/unpoller/poller"
+ "github.com/unpoller/unpoller/pkg/poller"
// Load input plugins!
- _ "github.com/unpoller/inputunifi"
+ _ "github.com/unpoller/unpoller/pkg/inputunifi"
// Load output plugins!
- _ "github.com/unpoller/datadogunifi"
- _ "github.com/unpoller/influxunifi"
- _ "github.com/unpoller/lokiunifi"
- _ "github.com/unpoller/promunifi"
+ _ "github.com/unpoller/unpoller/pkg/datadogunifi"
+ _ "github.com/unpoller/unpoller/pkg/influxunifi"
+ _ "github.com/unpoller/unpoller/pkg/lokiunifi"
+ _ "github.com/unpoller/unpoller/pkg/promunifi"
)
// Keep it simple.
diff --git a/pkg/datadogunifi/LICENSE b/pkg/datadogunifi/LICENSE
new file mode 100644
index 00000000..f452416f
--- /dev/null
+++ b/pkg/datadogunifi/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Cody Lee
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/pkg/datadogunifi/README.md b/pkg/datadogunifi/README.md
new file mode 100644
index 00000000..a3755288
--- /dev/null
+++ b/pkg/datadogunifi/README.md
@@ -0,0 +1,83 @@
+# datadogunifi
+
+UniFi Poller Output Plugin for DataDog
+
+## Configuration
+
+```yaml
+datadog:
+ # How often to poll UniFi and report to Datadog.
+ interval: "2m"
+
+ # To disable this output plugin
+ disable: false
+
+ # Datadog Custom Options
+
+ # address to talk to the datadog agent, by default this uses the local statsd UDP interface
+ # address: "..."
+
+ # namespace to prepend to all data
+ # namespace: ""
+
+ # tags to append to all data
+ # tags:
+ # - foo
+
+ # max_bytes_per_payload is the maximum number of bytes a single payload will contain.
+ # The magic value 0 will set the option to the optimal size for the transport
+ # protocol used when creating the client: 1432 for UDP and 8192 for UDS.
+ # max_bytes_per_payload: 0
+
+ # max_messages_per_payload is the maximum number of metrics, events and/or service checks a single payload will contain.
+ # This option can be set to `1` to create an unbuffered client.
+ # max_messages_per_payload: 0
+
+ # BufferPoolSize is the size of the pool of buffers in number of buffers.
+ # The magic value 0 will set the option to the optimal size for the transport
+ # protocol used when creating the client: 2048 for UDP and 512 for UDS.
+ # buffer_pool_size: 0
+
+ # buffer_flush_interval is the interval after which the current buffer will get flushed.
+ # buffer_flush_interval: 0
+
+ # buffer_shard_count is the number of buffer "shards" that will be used.
+ # Those shards allows the use of multiple buffers at the same time to reduce
+ # lock contention.
+ # buffer_shard_count: 0
+
+ # sender_queue_size is the size of the sender queue in number of buffers.
+ # The magic value 0 will set the option to the optimal size for the transport
+ # protocol used when creating the client: 2048 for UDP and 512 for UDS.
+ # sender_queue_size: 0
+
+ # write_timeout_uds is the timeout after which a UDS packet is dropped.
+ # write_timeout_uds: 5000
+
+ # receive_mode determines the behavior of the client when receiving to many
+ # metrics. The client will either drop the metrics if its buffers are
+ # full (ChannelMode mode) or block the caller until the metric can be
+ # handled (MutexMode mode). By default the client will MutexMode. This
+ # option should be set to ChannelMode only when use under very high
+ # load.
+ #
+ # MutexMode uses a mutex internally which is much faster than
+ # channel but causes some lock contention when used with a high number
+ # of threads. Mutex are sharded based on the metrics name which
+ # limit mutex contention when goroutines send different metrics.
+ #
+ # ChannelMode: uses channel (of ChannelModeBufferSize size) to send
+ # metrics and drop metrics if the channel is full. Sending metrics in
+ # this mode is slower that MutexMode (because of the channel), but
+ # will not block the application. This mode is made for application
+ # using many goroutines, sending the same metrics at a very high
+ # volume. The goal is to not slow down the application at the cost of
+ # dropping metrics and having a lower max throughput.
+ # receive_mode: 0
+
+ # channel_mode_buffer_size is the size of the channel holding incoming metrics
+ # channel_mode_buffer_size: 0
+
+ # aggregation_flush_interval is the interval for the aggregator to flush metrics
+ # aggregation_flush_interval: 0
+```
\ No newline at end of file
diff --git a/pkg/datadogunifi/alarms.go b/pkg/datadogunifi/alarms.go
new file mode 100644
index 00000000..745c5a03
--- /dev/null
+++ b/pkg/datadogunifi/alarms.go
@@ -0,0 +1,88 @@
+package datadogunifi
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/unpoller/unifi"
+)
+
+const (
+ alarmT = item("Alarm")
+ anomalyT = item("Anomaly")
+)
+
+// batchAlarms generates alarm events and logs for Datadog.
+func (u *DatadogUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dupl
+ if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ tagMap := map[string]string{
+ "dst_port": strconv.Itoa(event.DestPort),
+ "src_port": strconv.Itoa(event.SrcPort),
+ "dest_ip": event.DestIP,
+ "dst_mac": event.DstMAC,
+ "host": event.Host,
+ "msg": event.Msg,
+ "src_ip": event.SrcIP,
+ "src_mac": event.SrcMAC,
+ "dst_ip_asn": fmt.Sprintf("%d", event.DestIPGeo.Asn),
+ "dst_ip_latitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Latitude),
+ "dst_ip_longitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Longitude),
+ "dst_ip_city": event.DestIPGeo.City,
+ "dst_ip_continent_code": event.DestIPGeo.ContinentCode,
+ "dst_ip_country_code": event.DestIPGeo.CountryCode,
+ "dst_ip_country_name": event.DestIPGeo.CountryName,
+ "dst_ip_organization": event.DestIPGeo.Organization,
+ "src_ip_asn": fmt.Sprintf("%d", event.SourceIPGeo.Asn),
+ "src_ip_latitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Latitude),
+ "src_ip_longitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Longitude),
+ "src_ip_city": event.SourceIPGeo.City,
+ "src_ip_continent_code": event.SourceIPGeo.ContinentCode,
+ "src_ip_country_code": event.SourceIPGeo.CountryCode,
+ "src_ip_country_name": event.SourceIPGeo.CountryName,
+ "src_ip_organization": event.SourceIPGeo.Organization,
+ "site_name": event.SiteName,
+ "source": event.SourceName,
+ "in_iface": event.InIface,
+ "event_type": event.EventType,
+ "subsystem": event.Subsystem,
+ "archived": event.Archived.Txt,
+ "usg_ip": event.USGIP,
+ "proto": event.Proto,
+ "key": event.Key,
+ "catname": event.Catname,
+ "app_proto": event.AppProto,
+ "action": event.InnerAlertAction,
+ }
+ r.addCount(alarmT)
+
+ tagMap = cleanTags(tagMap)
+ tags := tagMapToTags(tagMap)
+ title := fmt.Sprintf("[%s][%s] Alarm at %s from %s", event.EventType, event.Catname, event.SiteName, event.SourceName)
+ _ = r.reportEvent(title, event.Datetime, event.Msg, tags)
+ r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Msg, tagMapToSimpleStrings(tagMap)))
+}
+
+// batchAnomaly generates Anomalies from UniFi for Datadog.
+func (u *DatadogUnifi) batchAnomaly(r report, event *unifi.Anomaly) {
+ if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ r.addCount(anomalyT)
+
+ tagMap := cleanTags(map[string]string{
+ "application": "unifi_anomaly",
+ "source": event.SourceName,
+ "site_name": event.SiteName,
+ "device_mac": event.DeviceMAC,
+ })
+ tags := tagMapToTags(tagMap)
+
+ title := fmt.Sprintf("Anomaly detected at %s from %s", event.SiteName, event.SourceName)
+ _ = r.reportEvent(title, event.Datetime, event.Anomaly, tags)
+ r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Anomaly, tagMapToSimpleStrings(tagMap)))
+}
diff --git a/pkg/datadogunifi/clients.go b/pkg/datadogunifi/clients.go
new file mode 100644
index 00000000..8c2ef504
--- /dev/null
+++ b/pkg/datadogunifi/clients.go
@@ -0,0 +1,189 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// batchClient generates Unifi Client datapoints for Datadog.
+// These points can be passed directly to Datadog.
+func (u *DatadogUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "ap_name": s.ApName,
+ "gw_name": s.GwName,
+ "sw_name": s.SwName,
+ "oui": s.Oui,
+ "radio_name": s.RadioName,
+ "radio": s.Radio,
+ "radio_proto": s.RadioProto,
+ "name": s.Name,
+ "fixed_ip": s.FixedIP,
+ "sw_port": s.SwPort.Txt,
+ "os_class": s.OsClass.Txt,
+ "os_name": s.OsName.Txt,
+ "dev_cat": s.DevCat.Txt,
+ "dev_id": s.DevID.Txt,
+ "dev_vendor": s.DevVendor.Txt,
+ "dev_family": s.DevFamily.Txt,
+ "is_wired": s.IsWired.Txt,
+ "is_guest": s.IsGuest.Txt,
+ "use_fixed_ip": s.UseFixedIP.Txt,
+ "channel": s.Channel.Txt,
+ "vlan": s.Vlan.Txt,
+ "hostname": s.Name,
+ "essid": s.Essid,
+ "bssid": s.Bssid,
+ "ip": s.IP,
+ }
+ powerSaveEnabled := 0.0
+ if s.PowersaveEnabled.Val {
+ powerSaveEnabled = 1.0
+ }
+ data := map[string]float64{
+ "anomalies": float64(s.Anomalies),
+ "channel": s.Channel.Val,
+ "satisfaction": s.Satisfaction.Val,
+ "bytes_r": float64(s.BytesR),
+ "ccq": float64(s.Ccq),
+ "noise": float64(s.Noise),
+ "powersave_enabled": powerSaveEnabled,
+ "roam_count": float64(s.RoamCount),
+ "rssi": float64(s.Rssi),
+ "rx_bytes": float64(s.RxBytes),
+ "rx_bytes_r": float64(s.RxBytesR),
+ "rx_packets": float64(s.RxPackets),
+ "rx_rate": float64(s.RxRate),
+ "signal": float64(s.Signal),
+ "tx_bytes": float64(s.TxBytes),
+ "tx_bytes_r": float64(s.TxBytesR),
+ "tx_packets": float64(s.TxPackets),
+ "tx_retries": float64(s.TxRetries),
+ "tx_power": float64(s.TxPower),
+ "tx_rate": float64(s.TxRate),
+ "uptime": float64(s.Uptime),
+ "wifi_tx_attempts": float64(s.WifiTxAttempts),
+ "wired_rx_bytes": float64(s.WiredRxBytes),
+ "wired_rx_bytes-r": float64(s.WiredRxBytesR),
+ "wired_rx_packets": float64(s.WiredRxPackets),
+ "wired_tx_bytes": float64(s.WiredTxBytes),
+ "wired_tx_bytes-r": float64(s.WiredTxBytesR),
+ "wired_tx_packets": float64(s.WiredTxPackets),
+ }
+
+ metricName := metricNamespace("clients")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+}
+
+// totalsDPImap: controller, site, name (app/cat name), dpi.
+type totalsDPImap map[string]map[string]map[string]unifi.DPIData
+
+func (u *DatadogUnifi) batchClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
+ s, ok := v.(*unifi.DPITable)
+ if !ok {
+ u.LogErrorf("invalid type given to batchClientDPI: %T", v)
+ return
+ }
+
+ for _, dpi := range s.ByApp {
+ category := unifi.DPICats.Get(dpi.Cat)
+ application := unifi.DPIApps.GetApp(dpi.Cat, dpi.App)
+ fillDPIMapTotals(appTotal, application, s.SourceName, s.SiteName, dpi)
+ fillDPIMapTotals(catTotal, category, s.SourceName, s.SiteName, dpi)
+
+ tags := map[string]string{
+ "category": category,
+ "application": application,
+ "name": s.Name,
+ "mac": s.MAC,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ }
+
+ data := map[string]float64{
+ "tx_packets": float64(dpi.TxPackets),
+ "rx_packets": float64(dpi.RxPackets),
+ "tx_bytes": float64(dpi.TxBytes),
+ "rx_bytes": float64(dpi.RxBytes),
+ }
+
+ metricName := metricNamespace("client_dpi")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
+
+// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
+// This allows less processing in Datadog to produce total transfer data per cat or app.
+func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
+ if m[controller] == nil {
+ m[controller] = make(map[string]map[string]unifi.DPIData)
+ }
+
+ if m[controller][site] == nil {
+ m[controller][site] = make(map[string]unifi.DPIData)
+ }
+
+ existing := m[controller][site][name]
+ existing.TxPackets += dpi.TxPackets
+ existing.RxPackets += dpi.RxPackets
+ existing.TxBytes += dpi.TxBytes
+ existing.RxBytes += dpi.RxBytes
+ m[controller][site][name] = existing
+}
+
+func reportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
+ type all []struct {
+ kind string
+ val totalsDPImap
+ }
+
+ // This produces 7000+ metrics per site. Disabled for now.
+ if appTotal != nil {
+ appTotal = nil
+ }
+
+ // This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
+ a := all{
+ // This produces 7000+ metrics per site. Disabled for now.
+ {
+ kind: "application",
+ val: appTotal,
+ },
+ {
+ kind: "category",
+ val: catTotal,
+ },
+ }
+
+ for _, k := range a {
+ for controller, s := range k.val {
+ for site, c := range s {
+ for name, m := range c {
+ tags := map[string]string{
+ "category": "TOTAL",
+ "application": "TOTAL",
+ "name": "TOTAL",
+ "mac": "TOTAL",
+ "site_name": site,
+ "source": controller,
+ }
+ tags[k.kind] = name
+
+ data := map[string]float64{
+ "tx_packets": float64(m.TxPackets),
+ "rx_packets": float64(m.RxPackets),
+ "tx_bytes": float64(m.TxBytes),
+ "rx_bytes": float64(m.RxBytes),
+ }
+
+ metricName := metricNamespace("client_dpi")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+ }
+ }
+ }
+}
diff --git a/pkg/datadogunifi/datadog.go b/pkg/datadogunifi/datadog.go
new file mode 100644
index 00000000..8006f0ca
--- /dev/null
+++ b/pkg/datadogunifi/datadog.go
@@ -0,0 +1,357 @@
+// Package datadogunifi provides the methods to turn UniFi measurements into Datadog
+// data points with appropriate tags and fields.
+package datadogunifi
+
+import (
+ "reflect"
+ "time"
+
+ "github.com/DataDog/datadog-go/statsd"
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+ "golift.io/cnfg"
+)
+
+const (
+ defaultInterval = 30 * time.Second
+ minimumInterval = 10 * time.Second
+)
+
+// Config defines the data needed to store metrics in Datadog.
+type Config struct {
+ // Required Config
+
+ // Interval controls the collection and reporting interval
+ Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval,omitempty" yaml:"interval,omitempty"`
+
+ // Save data for dead ports? ie. ports that are down or disabled.
+ DeadPorts bool `json:"dead_ports,omitempty" toml:"dead_ports,omitempty" xml:"dead_ports,omitempty" yaml:"dead_ports,omitempty"`
+
+ // Enable when true, enables this output plugin
+ Enable *bool `json:"enable" toml:"enable" xml:"enable,attr" yaml:"enable"`
+ // Address determines how to talk to the Datadog agent
+ Address string `json:"address" toml:"address" xml:"address,attr" yaml:"address"`
+
+ // Optional Statsd Options - mirrored from statsd.Options
+
+ // Namespace to prepend to all metrics, events and service checks name.
+ Namespace *string `json:"namespace" toml:"namespace" xml:"namespace,attr" yaml:"namespace"`
+
+ // Tags are global tags to be applied to every metrics, events and service checks.
+ Tags []string `json:"tags" toml:"tags" xml:"tags,attr" yaml:"tags"`
+
+ // MaxBytesPerPayload is the maximum number of bytes a single payload will contain.
+ // The magic value 0 will set the option to the optimal size for the transport
+ // protocol used when creating the client: 1432 for UDP and 8192 for UDS.
+ MaxBytesPerPayload *int `json:"max_bytes_per_payload" toml:"max_bytes_per_payload" xml:"max_bytes_per_payload,attr" yaml:"max_bytes_per_payload"`
+
+ // MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
+ // This option can be set to `1` to create an unbuffered client.
+ MaxMessagesPerPayload *int `json:"max_messages_per_payload" toml:"max_messages_per_payload" xml:"max_messages_per_payload,attr" yaml:"max_messages_per_payload"`
+
+ // BufferPoolSize is the size of the pool of buffers in number of buffers.
+ // The magic value 0 will set the option to the optimal size for the transport
+ // protocol used when creating the client: 2048 for UDP and 512 for UDS.
+ BufferPoolSize *int `json:"buffer_pool_size" toml:"buffer_pool_size" xml:"buffer_pool_size,attr" yaml:"buffer_pool_size"`
+
+ // BufferFlushInterval is the interval after which the current buffer will get flushed.
+ BufferFlushInterval *cnfg.Duration `json:"buffer_flush_interval" toml:"buffer_flush_interval" xml:"buffer_flush_interval,attr" yaml:"buffer_flush_interval"`
+
+ // BufferShardCount is the number of buffer "shards" that will be used.
+ // Those shards allows the use of multiple buffers at the same time to reduce
+ // lock contention.
+ BufferShardCount *int `json:"buffer_shard_count" toml:"buffer_shard_count" xml:"buffer_shard_count,attr" yaml:"buffer_shard_count"`
+
+ // SenderQueueSize is the size of the sender queue in number of buffers.
+ // The magic value 0 will set the option to the optimal size for the transport
+ // protocol used when creating the client: 2048 for UDP and 512 for UDS.
+ SenderQueueSize *int `json:"sender_queue_size" toml:"sender_queue_size" xml:"sender_queue_size,attr" yaml:"sender_queue_size"`
+
+ // WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
+ WriteTimeoutUDS *cnfg.Duration `json:"write_timeout_uds" toml:"write_timeout_uds" xml:"write_timeout_uds,attr" yaml:"write_timeout_uds"`
+
+ // ReceiveMode determines the behavior of the client when receiving to many
+ // metrics. The client will either drop the metrics if its buffers are
+ // full (ChannelMode mode) or block the caller until the metric can be
+ // handled (MutexMode mode). By default the client will MutexMode. This
+ // option should be set to ChannelMode only when use under very high
+ // load.
+ //
+ // MutexMode uses a mutex internally which is much faster than
+ // channel but causes some lock contention when used with a high number
+ // of threads. Mutex are sharded based on the metrics name which
+ // limit mutex contention when goroutines send different metrics.
+ //
+ // ChannelMode: uses channel (of ChannelModeBufferSize size) to send
+ // metrics and drop metrics if the channel is full. Sending metrics in
+ // this mode is slower that MutexMode (because of the channel), but
+ // will not block the application. This mode is made for application
+ // using many goroutines, sending the same metrics at a very high
+ // volume. The goal is to not slow down the application at the cost of
+ // dropping metrics and having a lower max throughput.
+ ReceiveMode *statsd.ReceivingMode `json:"receive_mode" toml:"receive_mode" xml:"receive_mode,attr" yaml:"receive_mode"`
+
+ // ChannelModeBufferSize is the size of the channel holding incoming metrics
+ ChannelModeBufferSize *int `json:"channel_mode_buffer_size" toml:"channel_mode_buffer_size" xml:"channel_mode_buffer_size,attr" yaml:"channel_mode_buffer_size"`
+
+ // AggregationFlushInterval is the interval for the aggregator to flush metrics
+ AggregationFlushInterval *time.Duration `json:"aggregation_flush_interval" toml:"aggregation_flush_interval" xml:"aggregation_flush_interval,attr" yaml:"aggregation_flush_interval"`
+}
+
+// Datadog allows the data to be context aware with configuration
+type Datadog struct {
+ *Config `json:"datadog" toml:"datadog" xml:"datadog" yaml:"datadog"`
+ options []statsd.Option // nolint
+}
+
+// DatadogUnifi is returned by New() after you provide a Config.
+type DatadogUnifi struct {
+ Collector poller.Collect
+ datadog statsd.ClientInterface
+ LastCheck time.Time
+ *Datadog
+}
+
+func init() { // nolint: gochecknoinits
+ u := &DatadogUnifi{Datadog: &Datadog{}, LastCheck: time.Now()}
+
+ poller.NewOutput(&poller.Output{
+ Name: "datadog",
+ Config: u.Datadog,
+ Method: u.Run,
+ })
+}
+
+func (u *DatadogUnifi) setConfigDefaults() {
+ if u.Interval.Duration == 0 {
+ u.Interval = cnfg.Duration{Duration: defaultInterval}
+ } else if u.Interval.Duration < minimumInterval {
+ u.Interval = cnfg.Duration{Duration: minimumInterval}
+ }
+
+ u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
+
+ u.options = make([]statsd.Option, 0)
+
+ if u.Namespace != nil {
+ u.options = append(u.options, statsd.WithNamespace(*u.Namespace))
+ }
+
+ if u.Tags != nil && len(u.Tags) > 0 {
+ u.options = append(u.options, statsd.WithTags(u.Tags))
+ }
+
+ if u.MaxBytesPerPayload != nil {
+ u.options = append(u.options, statsd.WithMaxBytesPerPayload(*u.MaxBytesPerPayload))
+ }
+
+ if u.MaxMessagesPerPayload != nil {
+ u.options = append(u.options, statsd.WithMaxMessagesPerPayload(*u.MaxMessagesPerPayload))
+ }
+
+ if u.BufferPoolSize != nil {
+ u.options = append(u.options, statsd.WithBufferPoolSize(*u.BufferPoolSize))
+ }
+
+ if u.BufferFlushInterval != nil {
+ u.options = append(u.options, statsd.WithBufferFlushInterval((*u.BufferFlushInterval).Duration))
+ }
+
+ if u.BufferShardCount != nil {
+ u.options = append(u.options, statsd.WithBufferShardCount(*u.BufferShardCount))
+ }
+
+ if u.SenderQueueSize != nil {
+ u.options = append(u.options, statsd.WithSenderQueueSize(*u.SenderQueueSize))
+ }
+
+ if u.WriteTimeoutUDS != nil {
+ u.options = append(u.options, statsd.WithWriteTimeoutUDS((*u.WriteTimeoutUDS).Duration))
+ }
+
+ if u.ReceiveMode != nil {
+ switch *u.ReceiveMode {
+ case statsd.ChannelMode:
+ u.options = append(u.options, statsd.WithChannelMode())
+ case statsd.MutexMode:
+ u.options = append(u.options, statsd.WithMutexMode())
+ }
+ }
+
+ if u.ChannelModeBufferSize != nil {
+ u.options = append(u.options, statsd.WithChannelModeBufferSize(*u.ChannelModeBufferSize))
+ }
+
+ if u.AggregationFlushInterval != nil {
+ u.options = append(u.options, statsd.WithAggregationInterval(*u.AggregationFlushInterval))
+ }
+
+}
+
+// Run runs a ticker to poll the unifi server and update Datadog.
+func (u *DatadogUnifi) Run(c poller.Collect) error {
+ disabled := u == nil || u.Enable == nil || !(*u.Enable) || u.Config == nil
+ if disabled {
+ u.LogDebugf("Datadog config is disabled, output is disabled.")
+ return nil
+ }
+ u.Collector = c
+ u.Logf("Datadog is configured.")
+ u.setConfigDefaults()
+
+ var err error
+ u.datadog, err = statsd.New(u.Address, u.options...)
+ if err != nil {
+ u.LogErrorf("Error configuration Datadog agent reporting: %+v", err)
+ return err
+ }
+
+ u.PollController()
+
+ return nil
+}
+
+// PollController runs forever, polling UniFi and pushing to Datadog
+// This is started by Run() or RunBoth() after everything is validated.
+func (u *DatadogUnifi) PollController() {
+ interval := u.Interval.Round(time.Second)
+ ticker := time.NewTicker(interval)
+ u.Logf("Everything checks out! Poller started, interval=%+v", interval)
+
+ for u.LastCheck = range ticker.C {
+ metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
+ if err != nil {
+ u.LogErrorf("metric fetch for Datadog failed: %v", err)
+ continue
+ }
+
+ events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
+ if err != nil {
+ u.LogErrorf("event fetch for Datadog failed", err)
+ continue
+ }
+
+ report, err := u.ReportMetrics(metrics, events)
+ if err != nil {
+ // Is the agent down?
+ u.LogErrorf("unable to report metrics and events", err)
+ _ = report.reportCount("unifi.collect.errors", 1, []string{})
+ continue
+ }
+ _ = report.reportCount("unifi.collect.success", 1, []string{})
+ u.LogDatadogReport(report)
+ }
+}
+
+// ReportMetrics batches all device and client data into datadog data points.
+// Call this after you've collected all the data you care about.
+// Returns an error if datadog statsd calls fail, otherwise returns a report.
+func (u *DatadogUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
+ r := &Report{
+ Metrics: m,
+ Events: e,
+ Start: time.Now(),
+ Counts: &Counts{Val: make(map[item]int)},
+ Collector: u.Collector,
+ client: u.datadog,
+ }
+ // batch all the points.
+ u.loopPoints(r)
+ r.End = time.Now()
+ r.Elapsed = r.End.Sub(r.Start)
+ _ = r.reportTiming("unifi.collector_timing", r.Elapsed, []string{})
+ return r, nil
+}
+
+// loopPoints collects all the data to immediately report to Datadog.
+func (u *DatadogUnifi) loopPoints(r report) {
+ m := r.metrics()
+
+ for _, s := range m.RogueAPs {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.Sites {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.SitesDPI {
+ u.reportSiteDPI(r, s.(*unifi.DPITable))
+ }
+
+ for _, s := range m.Clients {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.Devices {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range r.events().Logs {
+ u.switchExport(r, s)
+ }
+
+ appTotal := make(totalsDPImap)
+ catTotal := make(totalsDPImap)
+
+ for _, s := range m.ClientsDPI {
+ u.batchClientDPI(r, s, appTotal, catTotal)
+ }
+
+ reportClientDPItotals(r, appTotal, catTotal)
+}
+
+func (u *DatadogUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
+ switch v := v.(type) {
+ case *unifi.RogueAP:
+ u.batchRogueAP(r, v)
+ case *unifi.UAP:
+ u.batchUAP(r, v)
+ case *unifi.USW:
+ u.batchUSW(r, v)
+ case *unifi.USG:
+ u.batchUSG(r, v)
+ case *unifi.UXG:
+ u.batchUXG(r, v)
+ case *unifi.UDM:
+ u.batchUDM(r, v)
+ case *unifi.Site:
+ u.reportSite(r, v)
+ case *unifi.Client:
+ u.batchClient(r, v)
+ case *unifi.Event:
+ u.batchEvent(r, v)
+ case *unifi.IDS:
+ u.batchIDS(r, v)
+ case *unifi.Alarm:
+ u.batchAlarms(r, v)
+ case *unifi.Anomaly:
+ u.batchAnomaly(r, v)
+ default:
+ u.LogErrorf("invalid export, type=%+v", reflect.TypeOf(v))
+ }
+}
+
+// LogDatadogReport writes a log message after exporting to Datadog.
+func (u *DatadogUnifi) LogDatadogReport(r *Report) {
+ m := r.Metrics
+ u.Logf("UniFi Metrics Recorded num_sites=%d num_sites_dpi=%d num_clients=%d num_clients_dpi=%d num_rogue_ap=%d num_devices=%d errors=%v elapsec=%v",
+ len(m.Sites),
+ len(m.SitesDPI),
+ len(m.Clients),
+ len(m.ClientsDPI),
+ len(m.RogueAPs),
+ len(m.Devices),
+ r.Errors,
+ r.Elapsed,
+ )
+ metricName := metricNamespace("collector")
+ _ = r.reportCount(metricName("num_sites"), int64(len(m.Sites)), u.Tags)
+ _ = r.reportCount(metricName("num_sites_dpi"), int64(len(m.SitesDPI)), u.Tags)
+ _ = r.reportCount(metricName("num_clients"), int64(len(m.Clients)), u.Tags)
+ _ = r.reportCount(metricName("num_clients_dpi"), int64(len(m.ClientsDPI)), u.Tags)
+ _ = r.reportCount(metricName("num_rogue_ap"), int64(len(m.RogueAPs)), u.Tags)
+ _ = r.reportCount(metricName("num_devices"), int64(len(m.Devices)), u.Tags)
+ _ = r.reportCount(metricName("num_errors"), int64(len(r.Errors)), u.Tags)
+ _ = r.reportTiming(metricName("elapsed_time"), r.Elapsed, u.Tags)
+}
diff --git a/pkg/datadogunifi/events.go b/pkg/datadogunifi/events.go
new file mode 100644
index 00000000..07200ad4
--- /dev/null
+++ b/pkg/datadogunifi/events.go
@@ -0,0 +1,143 @@
+package datadogunifi
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/unpoller/unifi"
+)
+
+// These constants are used as names for printed/logged counters.
+const (
+ eventT = item("Event")
+ idsT = item("IDS")
+)
+
+// batchIDS generates intrusion detection datapoints for Datadog.
+func (u *DatadogUnifi) batchIDS(r report, i *unifi.IDS) { // nolint:dupl
+ if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ tagMap := map[string]string{
+ "dest_port": strconv.Itoa(i.DestPort),
+ "src_port": strconv.Itoa(i.SrcPort),
+ "dest_ip": i.DestIP,
+ "dst_mac": i.DstMAC,
+ "host": i.Host,
+ "msg": i.Msg,
+ "src_ip": i.SrcIP,
+ "src_mac": i.SrcMAC,
+ "dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
+ "dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
+ "dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
+ "dst_ip_city": i.DestIPGeo.City,
+ "dst_ip_continent_code": i.DestIPGeo.ContinentCode,
+ "dst_ip_country_code": i.DestIPGeo.CountryCode,
+ "dst_ip_country_name": i.DestIPGeo.CountryName,
+ "dst_ip_organization": i.DestIPGeo.Organization,
+ "src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
+ "src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
+ "src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
+ "src_ip_city": i.SourceIPGeo.City,
+ "src_ip_continent_code": i.SourceIPGeo.ContinentCode,
+ "src_ip_country_code": i.SourceIPGeo.CountryCode,
+ "src_ip_country_name": i.SourceIPGeo.CountryName,
+ "src_ip_organization": i.SourceIPGeo.Organization,
+ "site_name": i.SiteName,
+ "source": i.SourceName,
+ "in_iface": i.InIface,
+ "event_type": i.EventType,
+ "subsystem": i.Subsystem,
+ "archived": i.Archived.Txt,
+ "usg_ip": i.USGIP,
+ "proto": i.Proto,
+ "key": i.Key,
+ "catname": i.Catname,
+ "app_proto": i.AppProto,
+ "action": i.InnerAlertAction,
+ }
+
+ r.addCount(idsT)
+
+ tagMap = cleanTags(tagMap)
+ tags := tagMapToTags(tagMap)
+ title := fmt.Sprintf("Intrusion Detection at %s from %s", i.SiteName, i.SourceName)
+ _ = r.reportEvent(title, i.Datetime, i.Msg, tags)
+ r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
+}
+
+// batchEvents generates events from UniFi for Datadog.
+func (u *DatadogUnifi) batchEvent(r report, i *unifi.Event) { // nolint: funlen
+ if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ tagMap := map[string]string{
+ "guest": i.Guest, // mac address
+ "user": i.User, // mac address
+ "host": i.Host, // usg device?
+ "hostname": i.Hostname, // client name
+ "dest_port": strconv.Itoa(i.DestPort),
+ "src_port": strconv.Itoa(i.SrcPort),
+ "dst_ip": i.DestIP,
+ "dst_mac": i.DstMAC,
+ "ip": i.IP,
+ "src_ip": i.SrcIP,
+ "src_mac": i.SrcMAC,
+ "dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
+ "dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
+ "dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
+ "dst_ip_city": i.DestIPGeo.City,
+ "dst_ip_continent_code": i.DestIPGeo.ContinentCode,
+ "dst_ip_country_code": i.DestIPGeo.CountryCode,
+ "dst_ip_country_name": i.DestIPGeo.CountryName,
+ "dst_ip_organization": i.DestIPGeo.Organization,
+ "src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
+ "src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
+ "src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
+ "src_ip_city": i.SourceIPGeo.City,
+ "src_ip_continent_code": i.SourceIPGeo.ContinentCode,
+ "src_ip_country_code": i.SourceIPGeo.CountryCode,
+ "src_ip_country_name": i.SourceIPGeo.CountryName,
+ "src_ip_organization": i.SourceIPGeo.Organization,
+ "admin": i.Admin, // username
+ "site_name": i.SiteName,
+ "source": i.SourceName,
+ "ap_from": i.ApFrom,
+ "ap_to": i.ApTo,
+ "ap": i.Ap,
+ "ap_name": i.ApName,
+ "gw": i.Gw,
+ "gw_name": i.GwName,
+ "sw": i.Sw,
+ "sw_name": i.SwName,
+ "catname": i.Catname,
+ "radio": i.Radio,
+ "radio_from": i.RadioFrom,
+ "radio_to": i.RadioTo,
+ "key": i.Key,
+ "in_iface": i.InIface,
+ "event_type": i.EventType,
+ "subsystem": i.Subsystem,
+ "ssid": i.SSID,
+ "is_admin": i.IsAdmin.Txt,
+ "channel": i.Channel.Txt,
+ "channel_from": i.ChannelFrom.Txt,
+ "channel_to": i.ChannelTo.Txt,
+ "usg_ip": i.USGIP,
+ "network": i.Network,
+ "app_proto": i.AppProto,
+ "proto": i.Proto,
+ "action": i.InnerAlertAction,
+ }
+
+ r.addCount(eventT)
+
+ tagMap = cleanTags(tagMap)
+ tags := tagMapToTags(tagMap)
+ title := fmt.Sprintf("Unifi Event at %s from %s", i.SiteName, i.SourceName)
+ _ = r.reportEvent(title, i.Datetime, i.Msg, tags)
+ r.reportInfoLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
+}
diff --git a/pkg/datadogunifi/logger.go b/pkg/datadogunifi/logger.go
new file mode 100644
index 00000000..eddf5313
--- /dev/null
+++ b/pkg/datadogunifi/logger.go
@@ -0,0 +1,22 @@
+package datadogunifi
+
+// Logf logs a message.
+func (u *DatadogUnifi) Logf(msg string, v ...interface{}) {
+ if u.Collector != nil {
+ u.Collector.Logf(msg, v...)
+ }
+}
+
+// LogErrorf logs an error message.
+func (u *DatadogUnifi) LogErrorf(msg string, v ...interface{}) {
+ if u.Collector != nil {
+ u.Collector.LogErrorf(msg, v...)
+ }
+}
+
+// LogDebugf logs a debug message.
+func (u *DatadogUnifi) LogDebugf(msg string, v ...interface{}) {
+ if u.Collector != nil {
+ u.Collector.LogDebugf(msg, v...)
+ }
+}
diff --git a/pkg/datadogunifi/points.go b/pkg/datadogunifi/points.go
new file mode 100644
index 00000000..60573719
--- /dev/null
+++ b/pkg/datadogunifi/points.go
@@ -0,0 +1,49 @@
+package datadogunifi
+
+import (
+ "fmt"
+ "strings"
+)
+
+func tag(name string, value interface{}) string {
+ return fmt.Sprintf("%s:%v", name, value)
+}
+
+func tagMapToTags(tagMap map[string]string) []string {
+ tags := make([]string, 0)
+ for k, v := range tagMap {
+ tags = append(tags, tag(k, v))
+ }
+ return tags
+}
+
+func tagMapToSimpleStrings(tagMap map[string]string) string {
+ result := ""
+ for k, v := range tagMap {
+ result = fmt.Sprintf("%s%s=\"%v\", ", result, k, v)
+ }
+ return strings.TrimRight(result, ", ")
+}
+
+func metricNamespace(namespace string) func(string) string {
+ return func(name string) string {
+ return fmt.Sprintf("unifi.%s.%s", namespace, name)
+ }
+}
+
+func reportGaugeForFloat64Map(r report, metricName func(string) string, data map[string]float64, tags map[string]string) {
+ for name, value := range data {
+ _ = r.reportGauge(metricName(name), value, tagMapToTags(tags))
+ }
+}
+
+// cleanTags removes any tag that is empty.
+func cleanTags(tags map[string]string) map[string]string {
+ for i := range tags {
+ if tags[i] == "" {
+ delete(tags, i)
+ }
+ }
+
+ return tags
+}
diff --git a/pkg/datadogunifi/report.go b/pkg/datadogunifi/report.go
new file mode 100644
index 00000000..74e9ef56
--- /dev/null
+++ b/pkg/datadogunifi/report.go
@@ -0,0 +1,138 @@
+package datadogunifi
+
+import (
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-go/statsd"
+ "github.com/unpoller/unpoller/pkg/poller"
+)
+
+// Report is a will report the current collection run data.
+type Report struct {
+ Metrics *poller.Metrics
+ Events *poller.Events
+ Errors []error
+ Counts *Counts
+ Start time.Time
+ End time.Time
+ Elapsed time.Duration
+
+ Collector poller.Collect
+
+ Total int
+ Fields int
+
+ wg sync.WaitGroup
+
+ client statsd.ClientInterface
+}
+
+// Counts holds counters and has a lock to deal with routines.
+type Counts struct {
+ Val map[item]int
+ sync.RWMutex
+}
+
+type report interface {
+ add()
+ done()
+ error(err error)
+ metrics() *poller.Metrics
+ events() *poller.Events
+ addCount(item, ...int)
+
+ reportGauge(name string, value float64, tags []string) error
+ reportCount(name string, value int64, tags []string) error
+ reportDistribution(name string, value float64, tags []string) error
+ reportTiming(name string, value time.Duration, tags []string) error
+ reportEvent(title string, date time.Time, message string, tags []string) error
+ reportInfoLog(message string, f ...interface{})
+ reportWarnLog(message string, f ...interface{})
+ reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error
+}
+
+func (r *Report) add() {
+ r.wg.Add(1)
+}
+
+func (r *Report) done() {
+ r.wg.Done()
+}
+
+func (r *Report) metrics() *poller.Metrics {
+ return r.Metrics
+}
+
+func (r *Report) events() *poller.Events {
+ return r.Events
+}
+
+/* The following methods are not thread safe. */
+
+type item string
+
+func (r *Report) addCount(name item, counts ...int) {
+ r.Counts.Lock()
+ defer r.Counts.Unlock()
+
+ if len(counts) == 0 {
+ r.Counts.Val[name]++
+ }
+
+ for _, c := range counts {
+ r.Counts.Val[name] += c
+ }
+}
+
+func (r *Report) error(err error) {
+ if err != nil {
+ r.Errors = append(r.Errors, err)
+ }
+}
+
+func (r *Report) reportGauge(name string, value float64, tags []string) error {
+ return r.client.Gauge(name, value, tags, 1.0)
+}
+
+func (r *Report) reportCount(name string, value int64, tags []string) error {
+ return r.client.Count(name, value, tags, 1.0)
+}
+
+func (r *Report) reportDistribution(name string, value float64, tags []string) error {
+ return r.client.Distribution(name, value, tags, 1.0)
+}
+
+func (r *Report) reportTiming(name string, value time.Duration, tags []string) error {
+ return r.client.Timing(name, value, tags, 1.0)
+}
+
+func (r *Report) reportEvent(title string, date time.Time, message string, tags []string) error {
+ if date.IsZero() {
+ date = time.Now()
+ }
+ return r.client.Event(&statsd.Event{
+ Title: title,
+ Text: message,
+ Timestamp: date,
+ Tags: tags,
+ })
+}
+
+func (r *Report) reportInfoLog(message string, f ...interface{}) {
+ r.Collector.Logf(message, f)
+}
+
+func (r *Report) reportWarnLog(message string, f ...interface{}) {
+ r.Collector.Logf(message, f)
+}
+
+func (r *Report) reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error {
+ return r.client.ServiceCheck(&statsd.ServiceCheck{
+ Name: name,
+ Status: status,
+ Timestamp: time.Now(),
+ Message: message,
+ Tags: tags,
+ })
+}
diff --git a/pkg/datadogunifi/site.go b/pkg/datadogunifi/site.go
new file mode 100644
index 00000000..859b2f60
--- /dev/null
+++ b/pkg/datadogunifi/site.go
@@ -0,0 +1,80 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// reportSite generates Unifi Sites' datapoints for Datadog.
+// These points can be passed directly to Datadog.
+func (u *DatadogUnifi) reportSite(r report, s *unifi.Site) {
+ metricName := metricNamespace("subsystems")
+
+ for _, h := range s.Health {
+ tags := []string{
+ tag("name", s.Name),
+ tag("site_name", s.SiteName),
+ tag("source", s.SourceName),
+ tag("desc", s.Desc),
+ tag("status", h.Status),
+ tag("subsystem", h.Subsystem),
+ tag("wan_ip", h.WanIP),
+ tag("gw_name", h.GwName),
+ tag("lan_ip", h.LanIP),
+ }
+
+ data := map[string]float64{
+ "num_user": h.NumUser.Val,
+ "num_guest": h.NumGuest.Val,
+ "num_iot": h.NumIot.Val,
+ "tx_bytes_r": h.TxBytesR.Val,
+ "rx_bytes_r": h.RxBytesR.Val,
+ "num_ap": h.NumAp.Val,
+ "num_adopted": h.NumAdopted.Val,
+ "num_disabled": h.NumDisabled.Val,
+ "num_disconnected": h.NumDisconnected.Val,
+ "num_pending": h.NumPending.Val,
+ "num_gw": h.NumGw.Val,
+ "num_sta": h.NumSta.Val,
+ "gw_cpu": h.GwSystemStats.CPU.Val,
+ "gw_mem": h.GwSystemStats.Mem.Val,
+ "gw_uptime": h.GwSystemStats.Uptime.Val,
+ "latency": h.Latency.Val,
+ "uptime": h.Uptime.Val,
+ "drops": h.Drops.Val,
+ "xput_up": h.XputUp.Val,
+ "xput_down": h.XputDown.Val,
+ "speedtest_ping": h.SpeedtestPing.Val,
+ "speedtest_lastrun": h.SpeedtestLastrun.Val,
+ "num_sw": h.NumSw.Val,
+ "remote_user_num_active": h.RemoteUserNumActive.Val,
+ "remote_user_num_inactive": h.RemoteUserNumInactive.Val,
+ "remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
+ "remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
+ "remote_user_rx_packets": h.RemoteUserRxPackets.Val,
+ "remote_user_tx_packets": h.RemoteUserTxPackets.Val,
+ "num_new_alarms": s.NumNewAlarms.Val,
+ }
+
+ for name, value := range data {
+ _ = r.reportGauge(metricName(name), value, tags)
+ }
+ }
+}
+
+func (u *DatadogUnifi) reportSiteDPI(r report, s *unifi.DPITable) {
+ for _, dpi := range s.ByApp {
+ metricName := metricNamespace("sitedpi")
+
+ tags := []string{
+ tag("category", unifi.DPICats.Get(dpi.Cat)),
+ tag("application", unifi.DPIApps.GetApp(dpi.Cat, dpi.App)),
+ tag("site_name", s.SiteName),
+ tag("source", s.SourceName),
+ }
+
+ _ = r.reportCount(metricName("tx_packets"), dpi.TxPackets, tags)
+ _ = r.reportCount(metricName("rx_packets"), dpi.RxPackets, tags)
+ _ = r.reportCount(metricName("tx_bytes"), dpi.TxBytes, tags)
+ _ = r.reportCount(metricName("rx_bytes"), dpi.RxBytes, tags)
+ }
+}
diff --git a/pkg/datadogunifi/uap.go b/pkg/datadogunifi/uap.go
new file mode 100644
index 00000000..62ebe3ae
--- /dev/null
+++ b/pkg/datadogunifi/uap.go
@@ -0,0 +1,235 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uapT is used as a name for printed/logged counters.
+const uapT = item("UAP")
+
+// batchRogueAP generates metric points for neighboring access points.
+func (u *DatadogUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
+ if s.Age.Val == 0 {
+ return // only keep metrics for things that are recent.
+ }
+
+ tags := cleanTags(map[string]string{
+ "security": s.Security,
+ "oui": s.Oui,
+ "band": s.Band,
+ "mac": s.Bssid,
+ "ap_mac": s.ApMac,
+ "radio": s.Radio,
+ "radio_name": s.RadioName,
+ "site_name": s.SiteName,
+ "name": s.Essid,
+ "source": s.SourceName,
+ })
+
+ data := map[string]float64{
+ "age": s.Age.Val,
+ "bw": s.Bw.Val,
+ "center_freq": s.CenterFreq.Val,
+ "channel": float64(s.Channel),
+ "freq": s.Freq.Val,
+ "noise": s.Noise.Val,
+ "rssi": s.Rssi.Val,
+ "rssi_age": s.RssiAge.Val,
+ "signal": s.Signal.Val,
+ }
+
+ metricName := metricNamespace("uap_rogue")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+}
+
+// batchUAP generates Wireless-Access-Point datapoints for Datadog.
+// These points can be passed directly to datadog.
+func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := cleanTags(map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ })
+ data := CombineFloat64(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
+ data["bytes"] = s.Bytes.Val
+ data["last_seen"] = s.LastSeen.Val
+ data["rx_bytes"] = s.RxBytes.Val
+ data["tx_bytes"] = s.TxBytes.Val
+ data["uptime"] = s.Uptime.Val
+ data["user_num_sta"] = s.UserNumSta.Val
+ data["guest_num_sta"] = s.GuestNumSta.Val
+ data["num_sta"] = s.NumSta.Val
+
+ r.addCount(uapT)
+
+ metricName := metricNamespace("uap")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.processVAPTable(r, tags, s.VapTable)
+ u.batchPortTable(r, tags, s.PortTable)
+}
+
+func (u *DatadogUnifi) processUAPstats(ap *unifi.Ap) map[string]float64 {
+ if ap == nil {
+ return map[string]float64{}
+ }
+
+ // Accumulative Statistics.
+ return map[string]float64{
+ "stat_user-rx_packets": ap.UserRxPackets.Val,
+ "stat_guest-rx_packets": ap.GuestRxPackets.Val,
+ "stat_rx_packets": ap.RxPackets.Val,
+ "stat_user-rx_bytes": ap.UserRxBytes.Val,
+ "stat_guest-rx_bytes": ap.GuestRxBytes.Val,
+ "stat_rx_bytes": ap.RxBytes.Val,
+ "stat_user-rx_errors": ap.UserRxErrors.Val,
+ "stat_guest-rx_errors": ap.GuestRxErrors.Val,
+ "stat_rx_errors": ap.RxErrors.Val,
+ "stat_user-rx_dropped": ap.UserRxDropped.Val,
+ "stat_guest-rx_dropped": ap.GuestRxDropped.Val,
+ "stat_rx_dropped": ap.RxDropped.Val,
+ "stat_user-rx_crypts": ap.UserRxCrypts.Val,
+ "stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
+ "stat_rx_crypts": ap.RxCrypts.Val,
+ "stat_user-rx_frags": ap.UserRxFrags.Val,
+ "stat_guest-rx_frags": ap.GuestRxFrags.Val,
+ "stat_rx_frags": ap.RxFrags.Val,
+ "stat_user-tx_packets": ap.UserTxPackets.Val,
+ "stat_guest-tx_packets": ap.GuestTxPackets.Val,
+ "stat_tx_packets": ap.TxPackets.Val,
+ "stat_user-tx_bytes": ap.UserTxBytes.Val,
+ "stat_guest-tx_bytes": ap.GuestTxBytes.Val,
+ "stat_tx_bytes": ap.TxBytes.Val,
+ "stat_user-tx_errors": ap.UserTxErrors.Val,
+ "stat_guest-tx_errors": ap.GuestTxErrors.Val,
+ "stat_tx_errors": ap.TxErrors.Val,
+ "stat_user-tx_dropped": ap.UserTxDropped.Val,
+ "stat_guest-tx_dropped": ap.GuestTxDropped.Val,
+ "stat_tx_dropped": ap.TxDropped.Val,
+ "stat_user-tx_retries": ap.UserTxRetries.Val,
+ "stat_guest-tx_retries": ap.GuestTxRetries.Val,
+ }
+}
+
+// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
+func (u *DatadogUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) { // nolint: funlen
+ for _, s := range vt {
+ tags := map[string]string{
+ "device_name": t["name"],
+ "site_name": t["site_name"],
+ "source": t["source"],
+ "ap_mac": s.ApMac,
+ "bssid": s.Bssid,
+ "id": s.ID,
+ "name": s.Name,
+ "radio_name": s.RadioName,
+ "radio": s.Radio,
+ "essid": s.Essid,
+ "site_id": s.SiteID,
+ "usage": s.Usage,
+ "state": s.State,
+ "is_guest": s.IsGuest.Txt,
+ }
+ data := map[string]float64{
+ "ccq": float64(s.Ccq),
+ "mac_filter_rejections": float64(s.MacFilterRejections),
+ "num_satisfaction_sta": s.NumSatisfactionSta.Val,
+ "avg_client_signal": s.AvgClientSignal.Val,
+ "satisfaction": s.Satisfaction.Val,
+ "satisfaction_now": s.SatisfactionNow.Val,
+ "num_sta": float64(s.NumSta),
+ "channel": s.Channel.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "rx_crypts": s.RxCrypts.Val,
+ "rx_dropped": s.RxDropped.Val,
+ "rx_errors": s.RxErrors.Val,
+ "rx_frags": s.RxFrags.Val,
+ "rx_nwids": s.RxNwids.Val,
+ "rx_packets": s.RxPackets.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "tx_dropped": s.TxDropped.Val,
+ "tx_errors": s.TxErrors.Val,
+ "tx_packets": s.TxPackets.Val,
+ "tx_power": s.TxPower.Val,
+ "tx_retries": s.TxRetries.Val,
+ "tx_combined_retries": s.TxCombinedRetries.Val,
+ "tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
+ "tx_rts_retries": s.TxRtsRetries.Val,
+ "tx_success": s.TxSuccess.Val,
+ "tx_total": s.TxTotal.Val,
+ "tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
+ "tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
+ "tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
+ "tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
+ "rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
+ "rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
+ "rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
+ "rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
+ "wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
+ "wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
+ "wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
+ "wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
+ "wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
+ }
+
+ metricName := metricNamespace("uap_vaps")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
+
+func (u *DatadogUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
+ for _, p := range rt {
+ tags := map[string]string{
+ "device_name": t["name"],
+ "site_name": t["site_name"],
+ "source": t["source"],
+ "channel": p.Channel.Txt,
+ "radio": p.Radio,
+ "ht": p.Ht.Txt,
+ }
+ data := map[string]float64{
+ "current_antenna_gain": p.CurrentAntennaGain.Val,
+ "max_txpower": p.MaxTxpower.Val,
+ "min_txpower": p.MinTxpower.Val,
+ "nss": p.Nss.Val,
+ "radio_caps": p.RadioCaps.Val,
+ }
+
+ for _, t := range rts {
+ if t.Name == p.Name {
+ data["ast_be_xmit"] = t.AstBeXmit.Val
+ data["channel"] = t.Channel.Val
+ data["cu_self_rx"] = t.CuSelfRx.Val
+ data["cu_self_tx"] = t.CuSelfTx.Val
+ data["cu_total"] = t.CuTotal.Val
+ data["ext_channel"] = t.Extchannel.Val
+ data["gain"] = t.Gain.Val
+ data["guest_num_sta"] = t.GuestNumSta.Val
+ data["num_sta"] = t.NumSta.Val
+ data["tx_packets"] = t.TxPackets.Val
+ data["tx_power"] = t.TxPower.Val
+ data["tx_retries"] = t.TxRetries.Val
+ data["user_num_sta"] = t.UserNumSta.Val
+
+ break
+ }
+ }
+
+ metricName := metricNamespace("uap_radios")
+
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
diff --git a/pkg/datadogunifi/udm.go b/pkg/datadogunifi/udm.go
new file mode 100644
index 00000000..796beb95
--- /dev/null
+++ b/pkg/datadogunifi/udm.go
@@ -0,0 +1,196 @@
+package datadogunifi
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/unpoller/unifi"
+)
+
+// udmT is used as a name for printed/logged counters.
+const udmT = item("UDM")
+
+// Combine concatenates N maps. This will delete things if not used with caution.
+func Combine(in ...map[string]interface{}) map[string]interface{} {
+ out := make(map[string]interface{})
+
+ for i := range in {
+ for k := range in[i] {
+ out[k] = in[i][k]
+ }
+ }
+
+ return out
+}
+
+// CombineFloat64 concatenates N maps. This will delete things if not used with caution.
+func CombineFloat64(in ...map[string]float64) map[string]float64 {
+ out := make(map[string]float64)
+
+ for i := range in {
+ for k := range in[i] {
+ out[k] = in[i][k]
+ }
+ }
+
+ return out
+}
+
+// batchSysStats is used by all device types.
+func (u *DatadogUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]float64 {
+ m := map[string]float64{
+ "loadavg_1": s.Loadavg1.Val,
+ "loadavg_5": s.Loadavg5.Val,
+ "loadavg_15": s.Loadavg15.Val,
+ "mem_used": s.MemUsed.Val,
+ "mem_buffer": s.MemBuffer.Val,
+ "mem_total": s.MemTotal.Val,
+ "cpu": ss.CPU.Val,
+ "mem": ss.Mem.Val,
+ "system_uptime": ss.Uptime.Val,
+ }
+
+ for k, v := range ss.Temps {
+ temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
+ k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
+
+ if temp != 0 && k != "" {
+ m["temp_"+strings.ToLower(k)] = float64(temp)
+ }
+ }
+
+ return m
+}
+
+func (u *DatadogUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]float64 {
+ output := make(map[string]float64)
+
+ for _, t := range temps {
+ output["temp_"+t.Name] = t.Value
+ }
+
+ return output
+}
+
+func (u *DatadogUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]float64 {
+ output := make(map[string]float64)
+
+ for _, t := range storage {
+ output["storage_"+t.Name+"_size"] = t.Size.Val
+ output["storage_"+t.Name+"_used"] = t.Used.Val
+
+ if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
+ output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
+ } else {
+ output["storage_"+t.Name+"_pct"] = 0
+ }
+ }
+
+ return output
+}
+
+// batchUDM generates Unifi Gateway datapoints for Datadog.
+// These points can be passed directly to datadog.
+func (u *DatadogUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := cleanTags(map[string]string{
+ "source": s.SourceName,
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ "license_state": s.LicenseState,
+ })
+ data := CombineFloat64(
+ u.batchUDMstorage(s.Storage),
+ u.batchUDMtemps(s.Temperatures),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]float64{
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "guest_num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user_num_sta": s.UserNumSta.Val,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ },
+ )
+
+ r.addCount(udmT)
+ metricName := metricNamespace("usg")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+
+ tags = cleanTags(map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ })
+ data = CombineFloat64(
+ u.batchUSWstat(s.Stat.Sw),
+ map[string]float64{
+ "guest_num_sta": s.GuestNumSta.Val,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ })
+
+ metricName = metricNamespace("usw")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
+
+ if s.Stat.Ap == nil {
+ return // we're done now. the following code process UDM (non-pro) UAP data.
+ }
+
+ tags = cleanTags(map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ })
+ data = u.processUAPstats(s.Stat.Ap)
+ data["bytes"] = s.Bytes.Val
+ data["last_seen"] = s.LastSeen.Val
+ data["rx_bytes"] = s.RxBytes.Val
+ data["tx_bytes"] = s.TxBytes.Val
+ data["uptime"] = s.Uptime.Val
+ data["state"] = s.State.Val
+ data["user_num_sta"] = s.UserNumSta.Val
+ data["guest_num_sta"] = s.GuestNumSta.Val
+ data["num_sta"] = s.NumSta.Val
+
+ metricName = metricNamespace("uap")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
+ u.processVAPTable(r, tags, *s.VapTable)
+}
diff --git a/pkg/datadogunifi/usg.go b/pkg/datadogunifi/usg.go
new file mode 100644
index 00000000..95065e27
--- /dev/null
+++ b/pkg/datadogunifi/usg.go
@@ -0,0 +1,155 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// usgT is used as a name for printed/logged counters.
+const usgT = item("USG")
+
+// batchUSG generates Unifi Gateway datapoints for Datadog.
+// These points can be passed directly to datadog.
+func (u *DatadogUnifi) batchUSG(r report, s *unifi.USG) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ "license_state": s.LicenseState,
+ }
+ data := CombineFloat64(
+ u.batchUDMtemps(s.Temperatures),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ map[string]float64{
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "guest_num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user_num_sta": s.UserNumSta.Val,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ },
+ )
+
+ r.addCount(usgT)
+
+ metricName := metricNamespace("usg")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+}
+
+func (u *DatadogUnifi) batchUSGstats(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]float64 {
+ if gw == nil {
+ return map[string]float64{}
+ }
+
+ return map[string]float64{
+ "uplink_latency": ul.Latency.Val,
+ "uplink_speed": ul.Speed.Val,
+ "speedtest_status_latency": ss.Latency.Val,
+ "speedtest_status_runtime": ss.Runtime.Val,
+ "speedtest_status_rundate": ss.Rundate.Val,
+ "speedtest_status_ping": ss.StatusPing.Val,
+ "speedtest_status_xput_download": ss.XputDownload.Val,
+ "speedtest_status_xput_upload": ss.XputUpload.Val,
+ "lan_rx_bytes": gw.LanRxBytes.Val,
+ "lan_rx_packets": gw.LanRxPackets.Val,
+ "lan_tx_bytes": gw.LanTxBytes.Val,
+ "lan_tx_packets": gw.LanTxPackets.Val,
+ "lan_rx_dropped": gw.LanRxDropped.Val,
+ }
+}
+
+func (u *DatadogUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
+ for _, wan := range wans {
+ if !wan.Up.Val {
+ continue
+ }
+
+ tags := cleanTags(map[string]string{
+ "device_name": tags["name"],
+ "site_name": tags["site_name"],
+ "source": tags["source"],
+ "ip": wan.IP,
+ "purpose": wan.Name,
+ "mac": wan.Mac,
+ "ifname": wan.Ifname,
+ "type": wan.Type,
+ "up": wan.Up.Txt,
+ "enabled": wan.Enable.Txt,
+ "gateway": wan.Gateway,
+ })
+
+ fullDuplex := 0.0
+ if wan.FullDuplex.Val {
+ fullDuplex = 1.0
+ }
+ data := map[string]float64{
+ "bytes_r": wan.BytesR.Val,
+ "full_duplex": fullDuplex,
+ "max_speed": wan.MaxSpeed.Val,
+ "rx_bytes": wan.RxBytes.Val,
+ "rx_bytes_r": wan.RxBytesR.Val,
+ "rx_dropped": wan.RxDropped.Val,
+ "rx_errors": wan.RxErrors.Val,
+ "rx_broadcast": wan.RxBroadcast.Val,
+ "rx_multicast": wan.RxMulticast.Val,
+ "rx_packets": wan.RxPackets.Val,
+ "speed": wan.Speed.Val,
+ "tx_bytes": wan.TxBytes.Val,
+ "tx_bytes_r": wan.TxBytesR.Val,
+ "tx_dropped": wan.TxDropped.Val,
+ "tx_errors": wan.TxErrors.Val,
+ "tx_packets": wan.TxPackets.Val,
+ "tx_broadcast": wan.TxBroadcast.Val,
+ "tx_multicast": wan.TxMulticast.Val,
+ }
+
+ metricName := metricNamespace("usg.wan_ports")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
+
+func (u *DatadogUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
+ for _, p := range nt {
+ tags := cleanTags(map[string]string{
+ "device_name": tags["name"],
+ "site_name": tags["site_name"],
+ "source": tags["source"],
+ "up": p.Up.Txt,
+ "enabled": p.Enabled.Txt,
+ "ip": p.IP,
+ "mac": p.Mac,
+ "name": p.Name,
+ "domain_name": p.DomainName,
+ "purpose": p.Purpose,
+ "is_guest": p.IsGuest.Txt,
+ })
+ data := map[string]float64{
+ "num_sta": p.NumSta.Val,
+ "rx_bytes": p.RxBytes.Val,
+ "rx_packets": p.RxPackets.Val,
+ "tx_bytes": p.TxBytes.Val,
+ "tx_packets": p.TxPackets.Val,
+ }
+
+ metricName := metricNamespace("usg.networks")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
diff --git a/pkg/datadogunifi/usw.go b/pkg/datadogunifi/usw.go
new file mode 100644
index 00000000..aac3ae0b
--- /dev/null
+++ b/pkg/datadogunifi/usw.go
@@ -0,0 +1,136 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uswT is used as a name for printed/logged counters.
+const uswT = item("USW")
+
+// batchUSW generates Unifi Switch datapoints for Datadog.
+// These points can be passed directly to datadog.
+func (u *DatadogUnifi) batchUSW(r report, s *unifi.USW) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := cleanTags(map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ })
+ data := CombineFloat64(
+ u.batchUSWstat(s.Stat.Sw),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]float64{
+ "guest_num_sta": s.GuestNumSta.Val,
+ "bytes": s.Bytes.Val,
+ "fan_level": s.FanLevel.Val,
+ "general_temperature": s.GeneralTemperature.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user_num_sta": s.UserNumSta.Val,
+ })
+
+ r.addCount(uswT)
+ metricName := metricNamespace("usw")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchPortTable(r, tags, s.PortTable)
+}
+
+func (u *DatadogUnifi) batchUSWstat(sw *unifi.Sw) map[string]float64 {
+ if sw == nil {
+ return map[string]float64{}
+ }
+
+ return map[string]float64{
+ "stat_bytes": sw.Bytes.Val,
+ "stat_rx_bytes": sw.RxBytes.Val,
+ "stat_rx_crypts": sw.RxCrypts.Val,
+ "stat_rx_dropped": sw.RxDropped.Val,
+ "stat_rx_errors": sw.RxErrors.Val,
+ "stat_rx_frags": sw.RxFrags.Val,
+ "stat_rx_packets": sw.TxPackets.Val,
+ "stat_tx_bytes": sw.TxBytes.Val,
+ "stat_tx_dropped": sw.TxDropped.Val,
+ "stat_tx_errors": sw.TxErrors.Val,
+ "stat_tx_packets": sw.TxPackets.Val,
+ "stat_tx_retries": sw.TxRetries.Val,
+ }
+}
+
+//nolint:funlen
+func (u *DatadogUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
+ for _, p := range pt {
+ if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
+ continue // only record UP ports.
+ }
+
+ tags := cleanTags(map[string]string{
+ "site_name": t["site_name"],
+ "device_name": t["name"],
+ "source": t["source"],
+ "type": t["type"],
+ "name": p.Name,
+ "poe_mode": p.PoeMode,
+ "port_poe": p.PortPoe.Txt,
+ "port_idx": p.PortIdx.Txt,
+ "port_id": t["name"] + " Port " + p.PortIdx.Txt,
+ "poe_enable": p.PoeEnable.Txt,
+ "flow_ctrl_rx": p.FlowctrlRx.Txt,
+ "flow_ctrl_tx": p.FlowctrlTx.Txt,
+ "media": p.Media,
+ "has_sfp": p.SFPFound.Txt,
+ "sfp_compliance": p.SFPCompliance,
+ "sfp_serial": p.SFPSerial,
+ "sfp_vendor": p.SFPVendor,
+ "sfp_part": p.SFPPart,
+ })
+ data := map[string]float64{
+ "bytes_r": p.BytesR.Val,
+ "rx_broadcast": p.RxBroadcast.Val,
+ "rx_bytes": p.RxBytes.Val,
+ "rx_bytes_r": p.RxBytesR.Val,
+ "rx_dropped": p.RxDropped.Val,
+ "rx_errors": p.RxErrors.Val,
+ "rx_multicast": p.RxMulticast.Val,
+ "rx_packets": p.RxPackets.Val,
+ "speed": p.Speed.Val,
+ "stp_path_cost": p.StpPathcost.Val,
+ "tx_broadcast": p.TxBroadcast.Val,
+ "tx_bytes": p.TxBytes.Val,
+ "tx_bytes_r": p.TxBytesR.Val,
+ "tx_dropped": p.TxDropped.Val,
+ "tx_errors": p.TxErrors.Val,
+ "tx_multicast": p.TxMulticast.Val,
+ "tx_packets": p.TxPackets.Val,
+ }
+
+ if p.PoeEnable.Val && p.PortPoe.Val {
+ data["poe_current"] = p.PoeCurrent.Val
+ data["poe_power"] = p.PoePower.Val
+ data["poe_voltage"] = p.PoeVoltage.Val
+ }
+
+ if p.SFPFound.Val {
+ data["sfp_current"] = p.SFPCurrent.Val
+ data["sfp_voltage"] = p.SFPVoltage.Val
+ data["sfp_temperature"] = p.SFPTemperature.Val
+ data["sfp_tx_power"] = p.SFPTxpower.Val
+ data["sfp_rx_power"] = p.SFPRxpower.Val
+ }
+
+ metricName := metricNamespace("usw.ports")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+ }
+}
diff --git a/pkg/datadogunifi/uxg.go b/pkg/datadogunifi/uxg.go
new file mode 100644
index 00000000..c79f5e05
--- /dev/null
+++ b/pkg/datadogunifi/uxg.go
@@ -0,0 +1,83 @@
+package datadogunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uxgT is used as a name for printed/logged counters.
+const uxgT = item("UXG")
+
+// batchUXG generates 10Gb Unifi Gateway datapoints for Datadog.
+// These points can be passed directly to datadog.
+func (u *DatadogUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := cleanTags(map[string]string{
+ "source": s.SourceName,
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ "license_state": s.LicenseState,
+ })
+ data := CombineFloat64(
+ u.batchUDMstorage(s.Storage),
+ u.batchUDMtemps(s.Temperatures),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]float64{
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "guest_num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user_num_sta": s.UserNumSta.Val,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ },
+ )
+
+ r.addCount(uxgT)
+
+ metricName := metricNamespace("usg")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+
+ tags = cleanTags(map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ "ip": s.IP,
+ })
+ data = CombineFloat64(
+ u.batchUSWstat(s.Stat.Sw),
+ map[string]float64{
+ "guest_num_sta": s.GuestNumSta.Val,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ })
+
+ metricName = metricNamespace("usw")
+ reportGaugeForFloat64Map(r, metricName, data, tags)
+
+ u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
+}
diff --git a/pkg/influxunifi/LICENSE b/pkg/influxunifi/LICENSE
new file mode 100644
index 00000000..870eeecd
--- /dev/null
+++ b/pkg/influxunifi/LICENSE
@@ -0,0 +1,21 @@
+MIT LICENSE.
+Copyright (c) 2018-2021 David Newhall II
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pkg/influxunifi/README.md b/pkg/influxunifi/README.md
new file mode 100644
index 00000000..9edb84aa
--- /dev/null
+++ b/pkg/influxunifi/README.md
@@ -0,0 +1,5 @@
+# influxunifi
+
+## UnPoller Input Plugin
+
+Collects UniFi data from a UniFi controller using the API.
diff --git a/pkg/influxunifi/alarms.go b/pkg/influxunifi/alarms.go
new file mode 100644
index 00000000..510cd343
--- /dev/null
+++ b/pkg/influxunifi/alarms.go
@@ -0,0 +1,87 @@
+package influxunifi
+
+import (
+ "time"
+
+ "github.com/unpoller/unifi"
+)
+
+const (
+ alarmT = item("Alarm")
+ anomalyT = item("Anomaly")
+)
+
+// batchAlarms generates alarm datapoints for InfluxDB.
+func (u *InfluxUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dupl
+ if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ fields := map[string]interface{}{
+ "dest_port": event.DestPort,
+ "src_port": event.SrcPort,
+ "dest_ip": event.DestIP,
+ "dst_mac": event.DstMAC,
+ "host": event.Host,
+ "msg": event.Msg,
+ "src_ip": event.SrcIP,
+ "src_mac": event.SrcMAC,
+ "dstip_asn": event.DestIPGeo.Asn,
+ "dstip_latitude": event.DestIPGeo.Latitude,
+ "dstip_longitude": event.DestIPGeo.Longitude,
+ "dstip_city": event.DestIPGeo.City,
+ "dstip_continent_code": event.DestIPGeo.ContinentCode,
+ "dstip_country_code": event.DestIPGeo.CountryCode,
+ "dstip_country_name": event.DestIPGeo.CountryName,
+ "dstip_organization": event.DestIPGeo.Organization,
+ "srcip_asn": event.SourceIPGeo.Asn,
+ "srcip_latitude": event.SourceIPGeo.Latitude,
+ "srcip_longitude": event.SourceIPGeo.Longitude,
+ "srcip_city": event.SourceIPGeo.City,
+ "srcip_continent_code": event.SourceIPGeo.ContinentCode,
+ "srcip_country_code": event.SourceIPGeo.CountryCode,
+ "srcip_country_name": event.SourceIPGeo.CountryName,
+ "srcip_organization": event.SourceIPGeo.Organization,
+ }
+
+ r.addCount(alarmT)
+ r.send(&metric{
+ Table: "unifi_alarm",
+ TS: event.Datetime,
+ Fields: cleanFields(fields),
+ Tags: cleanTags(map[string]string{
+ "site_name": event.SiteName,
+ "source": event.SourceName,
+ "in_iface": event.InIface,
+ "event_type": event.EventType,
+ "subsystem": event.Subsystem,
+ "archived": event.Archived.Txt,
+ "usgip": event.USGIP,
+ "proto": event.Proto,
+ "key": event.Key,
+ "catname": event.Catname,
+ "app_proto": event.AppProto,
+ "action": event.InnerAlertAction,
+ }),
+ })
+}
+
+// batchAnomaly generates Anomalies from UniFi for InfluxDB.
+func (u *InfluxUnifi) batchAnomaly(r report, event *unifi.Anomaly) {
+ if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ r.addCount(anomalyT)
+ r.send(&metric{
+ TS: event.Datetime,
+ Table: "unifi_anomaly",
+ Fields: map[string]interface{}{"msg": event.Anomaly},
+ Tags: cleanTags(map[string]string{
+ "application": "unifi_anomaly",
+ "source": event.SourceName,
+ "site_name": event.SiteName,
+ "device_mac": event.DeviceMAC,
+ }),
+ })
+}
diff --git a/pkg/influxunifi/clients.go b/pkg/influxunifi/clients.go
new file mode 100644
index 00000000..bcf11dff
--- /dev/null
+++ b/pkg/influxunifi/clients.go
@@ -0,0 +1,183 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// batchClient generates Unifi Client datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "ap_name": s.ApName,
+ "gw_name": s.GwName,
+ "sw_name": s.SwName,
+ "oui": s.Oui,
+ "radio_name": s.RadioName,
+ "radio": s.Radio,
+ "radio_proto": s.RadioProto,
+ "name": s.Name,
+ "fixed_ip": s.FixedIP,
+ "sw_port": s.SwPort.Txt,
+ "os_class": s.OsClass.Txt,
+ "os_name": s.OsName.Txt,
+ "dev_cat": s.DevCat.Txt,
+ "dev_id": s.DevID.Txt,
+ "dev_vendor": s.DevVendor.Txt,
+ "dev_family": s.DevFamily.Txt,
+ "is_wired": s.IsWired.Txt,
+ "is_guest": s.IsGuest.Txt,
+ "use_fixedip": s.UseFixedIP.Txt,
+ "channel": s.Channel.Txt,
+ "vlan": s.Vlan.Txt,
+ }
+ fields := map[string]interface{}{
+ "anomalies": s.Anomalies,
+ "ip": s.IP,
+ "essid": s.Essid,
+ "bssid": s.Bssid,
+ "channel": s.Channel.Val,
+ "hostname": s.Name,
+ "radio_desc": s.RadioDescription,
+ "satisfaction": s.Satisfaction.Val,
+ "bytes_r": s.BytesR,
+ "ccq": s.Ccq,
+ "noise": s.Noise,
+ "note": s.Note,
+ "powersave_enabled": s.PowersaveEnabled,
+ "roam_count": s.RoamCount,
+ "rssi": s.Rssi,
+ "rx_bytes": s.RxBytes,
+ "rx_bytes_r": s.RxBytesR,
+ "rx_packets": s.RxPackets,
+ "rx_rate": s.RxRate,
+ "signal": s.Signal,
+ "tx_bytes": s.TxBytes,
+ "tx_bytes_r": s.TxBytesR,
+ "tx_packets": s.TxPackets,
+ "tx_retries": s.TxRetries,
+ "tx_power": s.TxPower,
+ "tx_rate": s.TxRate,
+ "uptime": s.Uptime,
+ "wifi_tx_attempts": s.WifiTxAttempts,
+ "wired-rx_bytes": s.WiredRxBytes,
+ "wired-rx_bytes-r": s.WiredRxBytesR,
+ "wired-rx_packets": s.WiredRxPackets,
+ "wired-tx_bytes": s.WiredTxBytes,
+ "wired-tx_bytes-r": s.WiredTxBytesR,
+ "wired-tx_packets": s.WiredTxPackets,
+ }
+
+ r.send(&metric{Table: "clients", Tags: tags, Fields: fields})
+}
+
+// totalsDPImap: controller, site, name (app/cat name), dpi.
+type totalsDPImap map[string]map[string]map[string]unifi.DPIData
+
+func (u *InfluxUnifi) batchClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
+ s, ok := v.(*unifi.DPITable)
+ if !ok {
+ u.LogErrorf("invalid type given to batchClientDPI: %T", v)
+ return
+ }
+
+ for _, dpi := range s.ByApp {
+ category := unifi.DPICats.Get(dpi.Cat)
+ application := unifi.DPIApps.GetApp(dpi.Cat, dpi.App)
+ fillDPIMapTotals(appTotal, application, s.SourceName, s.SiteName, dpi)
+ fillDPIMapTotals(catTotal, category, s.SourceName, s.SiteName, dpi)
+
+ r.send(&metric{
+ Table: "clientdpi",
+ Tags: map[string]string{
+ "category": category,
+ "application": application,
+ "name": s.Name,
+ "mac": s.MAC,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ },
+ Fields: map[string]interface{}{
+ "tx_packets": dpi.TxPackets,
+ "rx_packets": dpi.RxPackets,
+ "tx_bytes": dpi.TxBytes,
+ "rx_bytes": dpi.RxBytes,
+ },
+ })
+ }
+}
+
+// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
+// This allows less processing in InfluxDB to produce total transfer data per cat or app.
+func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
+ if m[controller] == nil {
+ m[controller] = make(map[string]map[string]unifi.DPIData)
+ }
+
+ if m[controller][site] == nil {
+ m[controller][site] = make(map[string]unifi.DPIData)
+ }
+
+ existing := m[controller][site][name]
+ existing.TxPackets += dpi.TxPackets
+ existing.RxPackets += dpi.RxPackets
+ existing.TxBytes += dpi.TxBytes
+ existing.RxBytes += dpi.RxBytes
+ m[controller][site][name] = existing
+}
+
+func reportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
+ type all []struct {
+ kind string
+ val totalsDPImap
+ }
+
+ // This produces 7000+ metrics per site. Disabled for now.
+ if appTotal != nil {
+ appTotal = nil
+ }
+
+ // This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
+ a := all{
+ // This produces 7000+ metrics per site. Disabled for now.
+ {
+ kind: "application",
+ val: appTotal,
+ },
+ {
+ kind: "category",
+ val: catTotal,
+ },
+ }
+
+ for _, k := range a {
+ for controller, s := range k.val {
+ for site, c := range s {
+ for name, m := range c {
+ newMetric := &metric{
+ Table: "clientdpi",
+ Tags: map[string]string{
+ "category": "TOTAL",
+ "application": "TOTAL",
+ "name": "TOTAL",
+ "mac": "TOTAL",
+ "site_name": site,
+ "source": controller,
+ },
+ Fields: map[string]interface{}{
+ "tx_packets": m.TxPackets,
+ "rx_packets": m.RxPackets,
+ "tx_bytes": m.TxBytes,
+ "rx_bytes": m.RxBytes,
+ },
+ }
+ newMetric.Tags[k.kind] = name
+
+ r.send(newMetric)
+ }
+ }
+ }
+ }
+}
diff --git a/pkg/influxunifi/events.go b/pkg/influxunifi/events.go
new file mode 100644
index 00000000..624f5678
--- /dev/null
+++ b/pkg/influxunifi/events.go
@@ -0,0 +1,185 @@
+package influxunifi
+
+import (
+ "time"
+
+ "github.com/unpoller/unifi"
+)
+
+// These constants are used as names for printed/logged counters.
+const (
+ eventT = item("Event")
+ idsT = item("IDS")
+)
+
+// batchIDS generates intrusion detection datapoints for InfluxDB.
+func (u *InfluxUnifi) batchIDS(r report, i *unifi.IDS) { // nolint:dupl
+ if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ fields := map[string]interface{}{
+ "dest_port": i.DestPort,
+ "src_port": i.SrcPort,
+ "dest_ip": i.DestIP,
+ "dst_mac": i.DstMAC,
+ "host": i.Host,
+ "msg": i.Msg,
+ "src_ip": i.SrcIP,
+ "src_mac": i.SrcMAC,
+ "dstip_asn": i.DestIPGeo.Asn,
+ "dstip_latitude": i.DestIPGeo.Latitude,
+ "dstip_longitude": i.DestIPGeo.Longitude,
+ "dstip_city": i.DestIPGeo.City,
+ "dstip_continent_code": i.DestIPGeo.ContinentCode,
+ "dstip_country_code": i.DestIPGeo.CountryCode,
+ "dstip_country_name": i.DestIPGeo.CountryName,
+ "dstip_organization": i.DestIPGeo.Organization,
+ "srcip_asn": i.SourceIPGeo.Asn,
+ "srcip_latitude": i.SourceIPGeo.Latitude,
+ "srcip_longitude": i.SourceIPGeo.Longitude,
+ "srcip_city": i.SourceIPGeo.City,
+ "srcip_continent_code": i.SourceIPGeo.ContinentCode,
+ "srcip_country_code": i.SourceIPGeo.CountryCode,
+ "srcip_country_name": i.SourceIPGeo.CountryName,
+ "srcip_organization": i.SourceIPGeo.Organization,
+ }
+
+ r.addCount(idsT)
+ r.send(&metric{
+ Table: "unifi_ids",
+ TS: i.Datetime,
+ Fields: cleanFields(fields),
+ Tags: cleanTags(map[string]string{
+ "site_name": i.SiteName,
+ "source": i.SourceName,
+ "in_iface": i.InIface,
+ "event_type": i.EventType,
+ "subsystem": i.Subsystem,
+ "archived": i.Archived.Txt,
+ "usgip": i.USGIP,
+ "proto": i.Proto,
+ "key": i.Key,
+ "catname": i.Catname,
+ "app_proto": i.AppProto,
+ "action": i.InnerAlertAction,
+ }),
+ })
+}
+
+// batchEvents generates events from UniFi for InfluxDB.
+func (u *InfluxUnifi) batchEvent(r report, i *unifi.Event) { // nolint: funlen
+ if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
+ return // The event is older than our interval, ignore it.
+ }
+
+ fields := map[string]interface{}{
+ "msg": i.Msg, // contains user[] or guest[] or admin[]
+ "duration": i.Duration.Val, // probably microseconds?
+ "guest": i.Guest, // mac address
+ "user": i.User, // mac address
+ "host": i.Host, // usg device?
+ "hostname": i.Hostname, // client name
+ "dest_port": i.DestPort,
+ "src_port": i.SrcPort,
+ "bytes": i.Bytes.Val,
+ "dest_ip": i.DestIP,
+ "dst_mac": i.DstMAC,
+ "ip": i.IP,
+ "src_ip": i.SrcIP,
+ "src_mac": i.SrcMAC,
+ "dstip_asn": i.DestIPGeo.Asn,
+ "dstip_latitude": i.DestIPGeo.Latitude,
+ "dstip_longitude": i.DestIPGeo.Longitude,
+ "dstip_city": i.DestIPGeo.City,
+ "dstip_continent_code": i.DestIPGeo.ContinentCode,
+ "dstip_country_code": i.DestIPGeo.CountryCode,
+ "dstip_country_name": i.DestIPGeo.CountryName,
+ "dstip_organization": i.DestIPGeo.Organization,
+ "srcip_asn": i.SourceIPGeo.Asn,
+ "srcip_latitude": i.SourceIPGeo.Latitude,
+ "srcip_longitude": i.SourceIPGeo.Longitude,
+ "srcip_city": i.SourceIPGeo.City,
+ "srcip_continent_code": i.SourceIPGeo.ContinentCode,
+ "srcip_country_code": i.SourceIPGeo.CountryCode,
+ "srcip_country_name": i.SourceIPGeo.CountryName,
+ "srcip_organization": i.SourceIPGeo.Organization,
+ }
+
+ r.addCount(eventT)
+ r.send(&metric{
+ TS: i.Datetime,
+ Table: "unifi_events",
+ Fields: cleanFields(fields),
+ Tags: cleanTags(map[string]string{
+ "admin": i.Admin, // username
+ "site_name": i.SiteName,
+ "source": i.SourceName,
+ "ap_from": i.ApFrom,
+ "ap_to": i.ApTo,
+ "ap": i.Ap,
+ "ap_name": i.ApName,
+ "gw": i.Gw,
+ "gw_name": i.GwName,
+ "sw": i.Sw,
+ "sw_name": i.SwName,
+ "catname": i.Catname,
+ "radio": i.Radio,
+ "radio_from": i.RadioFrom,
+ "radio_to": i.RadioTo,
+ "key": i.Key,
+ "in_iface": i.InIface,
+ "event_type": i.EventType,
+ "subsystem": i.Subsystem,
+ "ssid": i.SSID,
+ "is_admin": i.IsAdmin.Txt,
+ "channel": i.Channel.Txt,
+ "channel_from": i.ChannelFrom.Txt,
+ "channel_to": i.ChannelTo.Txt,
+ "usgip": i.USGIP,
+ "network": i.Network,
+ "app_proto": i.AppProto,
+ "proto": i.Proto,
+ "action": i.InnerAlertAction,
+ }),
+ })
+}
+
+// cleanTags removes any tag that is empty.
+func cleanTags(tags map[string]string) map[string]string {
+ for i := range tags {
+ if tags[i] == "" {
+ delete(tags, i)
+ }
+ }
+
+ return tags
+}
+
+// cleanFields removes any field with a default (or empty) value.
+func cleanFields(fields map[string]interface{}) map[string]interface{} { //nolint:cyclop
+ for s := range fields {
+ switch v := fields[s].(type) {
+ case nil:
+ delete(fields, s)
+ case int, int64, float64:
+ if v == 0 {
+ delete(fields, s)
+ }
+ case unifi.FlexBool:
+ if v.Txt == "" {
+ delete(fields, s)
+ }
+ case unifi.FlexInt:
+ if v.Txt == "" {
+ delete(fields, s)
+ }
+ case string:
+ if v == "" {
+ delete(fields, s)
+ }
+ }
+ }
+
+ return fields
+}
diff --git a/pkg/influxunifi/influxdb.go b/pkg/influxunifi/influxdb.go
new file mode 100644
index 00000000..7f31c378
--- /dev/null
+++ b/pkg/influxunifi/influxdb.go
@@ -0,0 +1,298 @@
+// Package influxunifi provides the methods to turn UniFi measurements into influx
+// data-points with appropriate tags and fields.
+package influxunifi
+
+import (
+ "crypto/tls"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ influx "github.com/influxdata/influxdb1-client/v2"
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+ "github.com/unpoller/unpoller/pkg/webserver"
+ "golift.io/cnfg"
+)
+
+// PluginName is the name of this plugin.
+const PluginName = "influxdb"
+
+const (
+ defaultInterval = 30 * time.Second
+ minimumInterval = 10 * time.Second
+ defaultInfluxDB = "unifi"
+ defaultInfluxUser = "unifipoller"
+ defaultInfluxURL = "http://127.0.0.1:8086"
+)
+
+// Config defines the data needed to store metrics in InfluxDB.
+type Config struct {
+ Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
+ URL string `json:"url,omitempty" toml:"url,omitempty" xml:"url" yaml:"url"`
+ User string `json:"user,omitempty" toml:"user,omitempty" xml:"user" yaml:"user"`
+ Pass string `json:"pass,omitempty" toml:"pass,omitempty" xml:"pass" yaml:"pass"`
+ DB string `json:"db,omitempty" toml:"db,omitempty" xml:"db" yaml:"db"`
+ Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
+ VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
+ // Save data for dead ports? ie. ports that are down or disabled.
+ DeadPorts bool `json:"dead_ports" toml:"dead_ports" xml:"dead_ports" yaml:"dead_ports"`
+}
+
+// InfluxDB allows the data to be nested in the config file.
+type InfluxDB struct {
+ *Config `json:"influxdb" toml:"influxdb" xml:"influxdb" yaml:"influxdb"`
+}
+
+// InfluxUnifi is returned by New() after you provide a Config.
+type InfluxUnifi struct {
+ Collector poller.Collect
+ influx influx.Client
+ LastCheck time.Time
+ *InfluxDB
+}
+
+type metric struct {
+ Table string
+ Tags map[string]string
+ Fields map[string]interface{}
+ TS time.Time
+}
+
+func init() { // nolint: gochecknoinits
+ u := &InfluxUnifi{InfluxDB: &InfluxDB{}, LastCheck: time.Now()}
+
+ poller.NewOutput(&poller.Output{
+ Name: PluginName,
+ Config: u.InfluxDB,
+ Method: u.Run,
+ })
+}
+
+// PollController runs forever, polling UniFi and pushing to InfluxDB
+// This is started by Run() or RunBoth() after everything checks out.
+func (u *InfluxUnifi) PollController() {
+ interval := u.Interval.Round(time.Second)
+ ticker := time.NewTicker(interval)
+ log.Printf("[INFO] Poller->InfluxDB started, interval: %v, dp: %v, db: %s, url: %s",
+ interval, u.DeadPorts, u.DB, u.URL)
+
+ for u.LastCheck = range ticker.C {
+ metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
+ if err != nil {
+ u.LogErrorf("metric fetch for InfluxDB failed: %v", err)
+ continue
+ }
+
+ events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
+ if err != nil {
+ u.LogErrorf("event fetch for InfluxDB failed: %v", err)
+ continue
+ }
+
+ report, err := u.ReportMetrics(metrics, events)
+ if err != nil {
+ // XXX: reset and re-auth? not sure..
+ u.LogErrorf("%v", err)
+ continue
+ }
+
+ u.Logf("UniFi Metrics Recorded. %v", report)
+ }
+}
+
+// Run runs a ticker to poll the unifi server and update influxdb.
+func (u *InfluxUnifi) Run(c poller.Collect) error {
+ var err error
+
+ if u.Collector = c; u.Config == nil || u.Disable {
+ u.Logf("InfluxDB config missing (or disabled), InfluxDB output disabled!")
+ return nil
+ }
+
+ u.setConfigDefaults()
+
+ u.influx, err = influx.NewHTTPClient(influx.HTTPConfig{
+ Addr: u.URL,
+ Username: u.User,
+ Password: u.Pass,
+ TLSConfig: &tls.Config{InsecureSkipVerify: !u.VerifySSL}, // nolint: gosec
+ })
+ if err != nil {
+ return fmt.Errorf("making client: %w", err)
+ }
+
+ fake := *u.Config
+ fake.Pass = strconv.FormatBool(fake.Pass != "")
+
+ webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: fake})
+ u.PollController()
+
+ return nil
+}
+
+func (u *InfluxUnifi) setConfigDefaults() {
+ if u.URL == "" {
+ u.URL = defaultInfluxURL
+ }
+
+ if u.User == "" {
+ u.User = defaultInfluxUser
+ }
+
+ if strings.HasPrefix(u.Pass, "file://") {
+ u.Pass = u.getPassFromFile(strings.TrimPrefix(u.Pass, "file://"))
+ }
+
+ if u.Pass == "" {
+ u.Pass = defaultInfluxUser
+ }
+
+ if u.DB == "" {
+ u.DB = defaultInfluxDB
+ }
+
+ if u.Interval.Duration == 0 {
+ u.Interval = cnfg.Duration{Duration: defaultInterval}
+ } else if u.Interval.Duration < minimumInterval {
+ u.Interval = cnfg.Duration{Duration: minimumInterval}
+ }
+
+ u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
+}
+
+func (u *InfluxUnifi) getPassFromFile(filename string) string {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ u.LogErrorf("Reading InfluxDB Password File: %v", err)
+ }
+
+ return strings.TrimSpace(string(b))
+}
+
+// ReportMetrics batches all device and client data into influxdb data points.
+// Call this after you've collected all the data you care about.
+// Returns an error if influxdb calls fail, otherwise returns a report.
+func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
+ r := &Report{
+ Metrics: m,
+ Events: e,
+ ch: make(chan *metric),
+ Start: time.Now(),
+ Counts: &Counts{Val: make(map[item]int)},
+ }
+ defer close(r.ch)
+
+ var err error
+
+ // Make a new Influx Points Batcher.
+ r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.DB})
+
+ if err != nil {
+ return nil, fmt.Errorf("influx.NewBatchPoint: %w", err)
+ }
+
+ go u.collect(r, r.ch)
+ // Batch all the points.
+ u.loopPoints(r)
+ r.wg.Wait() // wait for all points to finish batching!
+
+ // Send all the points.
+ if err = u.influx.Write(r.bp); err != nil {
+ return nil, fmt.Errorf("influxdb.Write(points): %w", err)
+ }
+
+ r.Elapsed = time.Since(r.Start)
+
+ return r, nil
+}
+
+// collect runs in a go routine and batches all the points.
+func (u *InfluxUnifi) collect(r report, ch chan *metric) {
+ for m := range ch {
+ if m.TS.IsZero() {
+ m.TS = r.metrics().TS
+ }
+
+ pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, m.TS)
+ if err == nil {
+ r.batch(m, pt)
+ }
+
+ r.error(err)
+ r.done()
+ }
+}
+
+// loopPoints kicks off 3 or 7 go routines to process metrics and send them
+// to the collect routine through the metric channel.
+func (u *InfluxUnifi) loopPoints(r report) {
+ m := r.metrics()
+
+ for _, s := range m.RogueAPs {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.Sites {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.SitesDPI {
+ u.batchSiteDPI(r, s)
+ }
+
+ for _, s := range m.Clients {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range m.Devices {
+ u.switchExport(r, s)
+ }
+
+ for _, s := range r.events().Logs {
+ u.switchExport(r, s)
+ }
+
+ appTotal := make(totalsDPImap)
+ catTotal := make(totalsDPImap)
+
+ for _, s := range m.ClientsDPI {
+ u.batchClientDPI(r, s, appTotal, catTotal)
+ }
+
+ reportClientDPItotals(r, appTotal, catTotal)
+}
+
+func (u *InfluxUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
+ switch v := v.(type) {
+ case *unifi.RogueAP:
+ u.batchRogueAP(r, v)
+ case *unifi.UAP:
+ u.batchUAP(r, v)
+ case *unifi.USW:
+ u.batchUSW(r, v)
+ case *unifi.USG:
+ u.batchUSG(r, v)
+ case *unifi.UXG:
+ u.batchUXG(r, v)
+ case *unifi.UDM:
+ u.batchUDM(r, v)
+ case *unifi.Site:
+ u.batchSite(r, v)
+ case *unifi.Client:
+ u.batchClient(r, v)
+ case *unifi.Event:
+ u.batchEvent(r, v)
+ case *unifi.IDS:
+ u.batchIDS(r, v)
+ case *unifi.Alarm:
+ u.batchAlarms(r, v)
+ case *unifi.Anomaly:
+ u.batchAnomaly(r, v)
+ default:
+ u.LogErrorf("invalid export type: %T", v)
+ }
+}
diff --git a/pkg/influxunifi/logger.go b/pkg/influxunifi/logger.go
new file mode 100644
index 00000000..32b9fc89
--- /dev/null
+++ b/pkg/influxunifi/logger.go
@@ -0,0 +1,38 @@
+package influxunifi
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/unpoller/unpoller/pkg/webserver"
+)
+
+// Logf logs a message.
+func (u *InfluxUnifi) Logf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "info"},
+ })
+ u.Collector.Logf(msg, v...)
+}
+
+// LogErrorf logs an error message.
+func (u *InfluxUnifi) LogErrorf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "error"},
+ })
+ u.Collector.LogErrorf(msg, v...)
+}
+
+// LogDebugf logs a debug message.
+func (u *InfluxUnifi) LogDebugf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "debug"},
+ })
+ u.Collector.LogDebugf(msg, v...)
+}
diff --git a/pkg/influxunifi/report.go b/pkg/influxunifi/report.go
new file mode 100644
index 00000000..d309c482
--- /dev/null
+++ b/pkg/influxunifi/report.go
@@ -0,0 +1,113 @@
+package influxunifi
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ influx "github.com/influxdata/influxdb1-client/v2"
+ "github.com/unpoller/unpoller/pkg/poller"
+)
+
+// Report is returned to the calling procedure after everything is processed.
+type Report struct {
+ Metrics *poller.Metrics
+ Events *poller.Events
+ Errors []error
+ Counts *Counts
+ Start time.Time
+ Elapsed time.Duration
+ ch chan *metric
+ wg sync.WaitGroup
+ bp influx.BatchPoints
+}
+
+// Counts holds counters and has a lock to deal with routines.
+type Counts struct {
+ Val map[item]int
+ sync.RWMutex
+}
+
+// report is an internal interface that can be mocked and overridden for tests.
+type report interface {
+ add()
+ done()
+ send(m *metric)
+ error(err error)
+ batch(m *metric, pt *influx.Point)
+ metrics() *poller.Metrics
+ events() *poller.Events
+ addCount(item, ...int)
+}
+
+func (r *Report) metrics() *poller.Metrics {
+ return r.Metrics
+}
+
+func (r *Report) events() *poller.Events {
+ return r.Events
+}
+
+func (r *Report) add() {
+ r.wg.Add(1)
+}
+
+func (r *Report) done() {
+ r.wg.Done()
+}
+
+func (r *Report) send(m *metric) {
+ r.wg.Add(1)
+ r.ch <- m
+}
+
+/* The following methods are not thread safe. */
+
+type item string
+
+func (r *Report) addCount(name item, counts ...int) {
+ r.Counts.Lock()
+ defer r.Counts.Unlock()
+
+ if len(counts) == 0 {
+ r.Counts.Val[name]++
+ }
+
+ for _, c := range counts {
+ r.Counts.Val[name] += c
+ }
+}
+
+func (r *Report) error(err error) {
+ if err != nil {
+ r.Errors = append(r.Errors, err)
+ }
+}
+
+// These constants are used as names for printed/logged counters.
+const (
+ pointT = item("Point")
+ fieldT = item("Fields")
+)
+
+func (r *Report) batch(m *metric, p *influx.Point) {
+ r.addCount(pointT)
+ r.addCount(fieldT, len(m.Fields))
+ r.bp.AddPoint(p)
+}
+
+func (r *Report) String() string {
+ r.Counts.RLock()
+ defer r.Counts.RUnlock()
+
+ m, c := r.Metrics, r.Counts.Val
+
+ return fmt.Sprintf("Site: %d, Client: %d, "+
+ "Gateways: %d, %s: %d, %s: %d, %s/%s/%s/%s: %d/%d/%d/%d, "+
+ "DPI Site/Client: %d/%d, %s: %d, %s: %d, Err: %d, Dur: %v",
+ len(m.Sites), len(m.Clients),
+ c[udmT]+c[usgT]+c[uxgT], uapT, c[uapT], uswT, c[uswT],
+ idsT, eventT, alarmT, anomalyT, c[idsT], c[eventT], c[alarmT], c[anomalyT],
+ len(m.SitesDPI), len(m.ClientsDPI), pointT, c[pointT], fieldT, c[fieldT],
+ len(r.Errors), r.Elapsed.Round(time.Millisecond))
+}
diff --git a/pkg/influxunifi/site.go b/pkg/influxunifi/site.go
new file mode 100644
index 00000000..9ad2263b
--- /dev/null
+++ b/pkg/influxunifi/site.go
@@ -0,0 +1,84 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// batchSite generates Unifi Sites' datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchSite(r report, s *unifi.Site) {
+ for _, h := range s.Health {
+ tags := map[string]string{
+ "name": s.Name,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "desc": s.Desc,
+ "status": h.Status,
+ "subsystem": h.Subsystem,
+ "wan_ip": h.WanIP,
+ "gw_name": h.GwName,
+ "lan_ip": h.LanIP,
+ }
+ fields := map[string]interface{}{
+ "num_user": h.NumUser.Val,
+ "num_guest": h.NumGuest.Val,
+ "num_iot": h.NumIot.Val,
+ "tx_bytes-r": h.TxBytesR.Val,
+ "rx_bytes-r": h.RxBytesR.Val,
+ "num_ap": h.NumAp.Val,
+ "num_adopted": h.NumAdopted.Val,
+ "num_disabled": h.NumDisabled.Val,
+ "num_disconnected": h.NumDisconnected.Val,
+ "num_pending": h.NumPending.Val,
+ "num_gw": h.NumGw.Val,
+ "wan_ip": h.WanIP,
+ "num_sta": h.NumSta.Val,
+ "gw_cpu": h.GwSystemStats.CPU.Val,
+ "gw_mem": h.GwSystemStats.Mem.Val,
+ "gw_uptime": h.GwSystemStats.Uptime.Val,
+ "latency": h.Latency.Val,
+ "uptime": h.Uptime.Val,
+ "drops": h.Drops.Val,
+ "xput_up": h.XputUp.Val,
+ "xput_down": h.XputDown.Val,
+ "speedtest_ping": h.SpeedtestPing.Val,
+ "speedtest_lastrun": h.SpeedtestLastrun.Val,
+ "num_sw": h.NumSw.Val,
+ "remote_user_num_active": h.RemoteUserNumActive.Val,
+ "remote_user_num_inactive": h.RemoteUserNumInactive.Val,
+ "remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
+ "remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
+ "remote_user_rx_packets": h.RemoteUserRxPackets.Val,
+ "remote_user_tx_packets": h.RemoteUserTxPackets.Val,
+ "num_new_alarms": s.NumNewAlarms.Val,
+ }
+
+ r.send(&metric{Table: "subsystems", Tags: tags, Fields: fields})
+ }
+}
+
+func (u *InfluxUnifi) batchSiteDPI(r report, v interface{}) {
+ s, ok := v.(*unifi.DPITable)
+ if !ok {
+ u.LogErrorf("invalid type given to batchSiteDPI: %T", v)
+ return
+ }
+
+ for _, dpi := range s.ByApp {
+ r.send(&metric{
+ Table: "sitedpi",
+ Tags: map[string]string{
+ "category": unifi.DPICats.Get(dpi.Cat),
+ "application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ },
+ Fields: map[string]interface{}{
+ "tx_packets": dpi.TxPackets,
+ "rx_packets": dpi.RxPackets,
+ "tx_bytes": dpi.TxBytes,
+ "rx_bytes": dpi.RxBytes,
+ },
+ })
+ }
+}
diff --git a/pkg/influxunifi/uap.go b/pkg/influxunifi/uap.go
new file mode 100644
index 00000000..4bc936ba
--- /dev/null
+++ b/pkg/influxunifi/uap.go
@@ -0,0 +1,227 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uapT is used as a name for printed/logged counters.
+const uapT = item("UAP")
+
+// batchRogueAP generates metric points for neighboring access points.
+func (u *InfluxUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
+ if s.Age.Val == 0 {
+ return // only keep metrics for things that are recent.
+ }
+
+ r.send(&metric{
+ Table: "uap_rogue",
+ Tags: map[string]string{
+ "security": s.Security,
+ "oui": s.Oui,
+ "band": s.Band,
+ "mac": s.Bssid,
+ "ap_mac": s.ApMac,
+ "radio": s.Radio,
+ "radio_name": s.RadioName,
+ "site_name": s.SiteName,
+ "name": s.Essid,
+ "source": s.SourceName,
+ },
+ Fields: map[string]interface{}{
+ "age": s.Age.Val,
+ "bw": s.Bw.Val,
+ "center_freq": s.CenterFreq.Val,
+ "channel": s.Channel,
+ "freq": s.Freq.Val,
+ "noise": s.Noise.Val,
+ "rssi": s.Rssi.Val,
+ "rssi_age": s.RssiAge.Val,
+ "signal": s.Signal.Val,
+ },
+ })
+}
+
+// batchUAP generates Wireless-Access-Point datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchUAP(r report, s *unifi.UAP) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields := Combine(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
+ fields["ip"] = s.IP
+ fields["bytes"] = s.Bytes.Val
+ fields["last_seen"] = s.LastSeen.Val
+ fields["rx_bytes"] = s.RxBytes.Val
+ fields["tx_bytes"] = s.TxBytes.Val
+ fields["uptime"] = s.Uptime.Val
+ fields["user-num_sta"] = int(s.UserNumSta.Val)
+ fields["guest-num_sta"] = int(s.GuestNumSta.Val)
+ fields["num_sta"] = s.NumSta.Val
+
+ r.addCount(uapT)
+ r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
+ u.processRadTable(r, tags, s.RadioTable, s.RadioTableStats)
+ u.processVAPTable(r, tags, s.VapTable)
+ u.batchPortTable(r, tags, s.PortTable)
+}
+
+func (u *InfluxUnifi) processUAPstats(ap *unifi.Ap) map[string]interface{} {
+ if ap == nil {
+ return map[string]interface{}{}
+ }
+
+ // Accumulative Statistics.
+ return map[string]interface{}{
+ "stat_user-rx_packets": ap.UserRxPackets.Val,
+ "stat_guest-rx_packets": ap.GuestRxPackets.Val,
+ "stat_rx_packets": ap.RxPackets.Val,
+ "stat_user-rx_bytes": ap.UserRxBytes.Val,
+ "stat_guest-rx_bytes": ap.GuestRxBytes.Val,
+ "stat_rx_bytes": ap.RxBytes.Val,
+ "stat_user-rx_errors": ap.UserRxErrors.Val,
+ "stat_guest-rx_errors": ap.GuestRxErrors.Val,
+ "stat_rx_errors": ap.RxErrors.Val,
+ "stat_user-rx_dropped": ap.UserRxDropped.Val,
+ "stat_guest-rx_dropped": ap.GuestRxDropped.Val,
+ "stat_rx_dropped": ap.RxDropped.Val,
+ "stat_user-rx_crypts": ap.UserRxCrypts.Val,
+ "stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
+ "stat_rx_crypts": ap.RxCrypts.Val,
+ "stat_user-rx_frags": ap.UserRxFrags.Val,
+ "stat_guest-rx_frags": ap.GuestRxFrags.Val,
+ "stat_rx_frags": ap.RxFrags.Val,
+ "stat_user-tx_packets": ap.UserTxPackets.Val,
+ "stat_guest-tx_packets": ap.GuestTxPackets.Val,
+ "stat_tx_packets": ap.TxPackets.Val,
+ "stat_user-tx_bytes": ap.UserTxBytes.Val,
+ "stat_guest-tx_bytes": ap.GuestTxBytes.Val,
+ "stat_tx_bytes": ap.TxBytes.Val,
+ "stat_user-tx_errors": ap.UserTxErrors.Val,
+ "stat_guest-tx_errors": ap.GuestTxErrors.Val,
+ "stat_tx_errors": ap.TxErrors.Val,
+ "stat_user-tx_dropped": ap.UserTxDropped.Val,
+ "stat_guest-tx_dropped": ap.GuestTxDropped.Val,
+ "stat_tx_dropped": ap.TxDropped.Val,
+ "stat_user-tx_retries": ap.UserTxRetries.Val,
+ "stat_guest-tx_retries": ap.GuestTxRetries.Val,
+ }
+}
+
+// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
+func (u *InfluxUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) { // nolint: funlen
+ for _, s := range vt {
+ tags := map[string]string{
+ "device_name": t["name"],
+ "site_name": t["site_name"],
+ "source": t["source"],
+ "ap_mac": s.ApMac,
+ "bssid": s.Bssid,
+ "id": s.ID,
+ "name": s.Name,
+ "radio_name": s.RadioName,
+ "radio": s.Radio,
+ "essid": s.Essid,
+ "site_id": s.SiteID,
+ "usage": s.Usage,
+ "state": s.State,
+ "is_guest": s.IsGuest.Txt,
+ }
+ fields := map[string]interface{}{
+ "ccq": s.Ccq,
+ "mac_filter_rejections": s.MacFilterRejections,
+ "num_satisfaction_sta": s.NumSatisfactionSta.Val,
+ "avg_client_signal": s.AvgClientSignal.Val,
+ "satisfaction": s.Satisfaction.Val,
+ "satisfaction_now": s.SatisfactionNow.Val,
+ "num_sta": s.NumSta,
+ "channel": s.Channel.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "rx_crypts": s.RxCrypts.Val,
+ "rx_dropped": s.RxDropped.Val,
+ "rx_errors": s.RxErrors.Val,
+ "rx_frags": s.RxFrags.Val,
+ "rx_nwids": s.RxNwids.Val,
+ "rx_packets": s.RxPackets.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "tx_dropped": s.TxDropped.Val,
+ "tx_errors": s.TxErrors.Val,
+ "tx_packets": s.TxPackets.Val,
+ "tx_power": s.TxPower.Val,
+ "tx_retries": s.TxRetries.Val,
+ "tx_combined_retries": s.TxCombinedRetries.Val,
+ "tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
+ "tx_rts_retries": s.TxRtsRetries.Val,
+ "tx_success": s.TxSuccess.Val,
+ "tx_total": s.TxTotal.Val,
+ "tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
+ "tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
+ "tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
+ "tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
+ "rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
+ "rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
+ "rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
+ "rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
+ "wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
+ "wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
+ "wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
+ "wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
+ "wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
+ }
+
+ r.send(&metric{Table: "uap_vaps", Tags: tags, Fields: fields})
+ }
+}
+
+func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
+ for _, p := range rt {
+ tags := map[string]string{
+ "device_name": t["name"],
+ "site_name": t["site_name"],
+ "source": t["source"],
+ "channel": p.Channel.Txt,
+ "radio": p.Radio,
+ }
+ fields := map[string]interface{}{
+ "current_antenna_gain": p.CurrentAntennaGain.Val,
+ "ht": p.Ht.Txt,
+ "max_txpower": p.MaxTxpower.Val,
+ "min_txpower": p.MinTxpower.Val,
+ "nss": p.Nss.Val,
+ "radio_caps": p.RadioCaps.Val,
+ }
+
+ for _, t := range rts {
+ if t.Name == p.Name {
+ fields["ast_be_xmit"] = t.AstBeXmit.Val
+ fields["channel"] = t.Channel.Val
+ fields["cu_self_rx"] = t.CuSelfRx.Val
+ fields["cu_self_tx"] = t.CuSelfTx.Val
+ fields["cu_total"] = t.CuTotal.Val
+ fields["extchannel"] = t.Extchannel.Val
+ fields["gain"] = t.Gain.Val
+ fields["guest-num_sta"] = t.GuestNumSta.Val
+ fields["num_sta"] = t.NumSta.Val
+ fields["radio"] = t.Radio
+ fields["tx_packets"] = t.TxPackets.Val
+ fields["tx_power"] = t.TxPower.Val
+ fields["tx_retries"] = t.TxRetries.Val
+ fields["user-num_sta"] = t.UserNumSta.Val
+
+ break
+ }
+ }
+
+ r.send(&metric{Table: "uap_radios", Tags: tags, Fields: fields})
+ }
+}
diff --git a/pkg/influxunifi/udm.go b/pkg/influxunifi/udm.go
new file mode 100644
index 00000000..d34642d5
--- /dev/null
+++ b/pkg/influxunifi/udm.go
@@ -0,0 +1,179 @@
+package influxunifi
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/unpoller/unifi"
+)
+
+// udmT is used as a name for printed/logged counters.
+const udmT = item("UDM")
+
+// Combine concatenates N maps. This will delete things if not used with caution.
+func Combine(in ...map[string]interface{}) map[string]interface{} {
+ out := make(map[string]interface{})
+
+ for i := range in {
+ for k := range in[i] {
+ out[k] = in[i][k]
+ }
+ }
+
+ return out
+}
+
+// batchSysStats is used by all device types.
+func (u *InfluxUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]interface{} {
+ m := map[string]interface{}{
+ "loadavg_1": s.Loadavg1.Val,
+ "loadavg_5": s.Loadavg5.Val,
+ "loadavg_15": s.Loadavg15.Val,
+ "mem_used": s.MemUsed.Val,
+ "mem_buffer": s.MemBuffer.Val,
+ "mem_total": s.MemTotal.Val,
+ "cpu": ss.CPU.Val,
+ "mem": ss.Mem.Val,
+ "system_uptime": ss.Uptime.Val,
+ }
+
+ for k, v := range ss.Temps {
+ temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
+ k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
+
+ if temp != 0 && k != "" {
+ m["temp_"+strings.ToLower(k)] = temp
+ }
+ }
+
+ return m
+}
+
+func (u *InfluxUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]interface{} {
+ output := make(map[string]interface{})
+
+ for _, t := range temps {
+ output["temp_"+t.Name] = t.Value
+ }
+
+ return output
+}
+
+func (u *InfluxUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]interface{} {
+ output := make(map[string]interface{})
+
+ for _, t := range storage {
+ output["storage_"+t.Name+"_size"] = t.Size.Val
+ output["storage_"+t.Name+"_used"] = t.Used.Val
+
+ if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
+ output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
+ } else {
+ output["storage_"+t.Name+"_pct"] = 0
+ }
+ }
+
+ return output
+}
+
+// batchUDM generates Unifi Gateway datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "source": s.SourceName,
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields := Combine(
+ u.batchUDMstorage(s.Storage),
+ u.batchUDMtemps(s.Temperatures),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]interface{}{
+ "source": s.SourceName,
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "license_state": s.LicenseState,
+ "guest-num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user-num_sta": s.UserNumSta.Val,
+ "version": s.Version,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ },
+ )
+
+ r.addCount(udmT)
+ r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+
+ tags = map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields = Combine(
+ u.batchUSWstat(s.Stat.Sw),
+ map[string]interface{}{
+ "guest-num_sta": s.GuestNumSta.Val,
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ })
+
+ r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
+ u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
+
+ if s.Stat.Ap == nil {
+ return // we're done now. the following code process UDM (non-pro) UAP data.
+ }
+
+ tags = map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields = u.processUAPstats(s.Stat.Ap)
+ fields["ip"] = s.IP
+ fields["bytes"] = s.Bytes.Val
+ fields["last_seen"] = s.LastSeen.Val
+ fields["rx_bytes"] = s.RxBytes.Val
+ fields["tx_bytes"] = s.TxBytes.Val
+ fields["uptime"] = s.Uptime.Val
+ fields["state"] = s.State
+ fields["user-num_sta"] = int(s.UserNumSta.Val)
+ fields["guest-num_sta"] = int(s.GuestNumSta.Val)
+ fields["num_sta"] = s.NumSta.Val
+
+ r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
+ u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
+ u.processVAPTable(r, tags, *s.VapTable)
+}
diff --git a/pkg/influxunifi/usg.go b/pkg/influxunifi/usg.go
new file mode 100644
index 00000000..92477042
--- /dev/null
+++ b/pkg/influxunifi/usg.go
@@ -0,0 +1,147 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// usgT is used as a name for printed/logged counters.
+const usgT = item("USG")
+
+// batchUSG generates Unifi Gateway datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchUSG(r report, s *unifi.USG) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields := Combine(
+ u.batchUDMtemps(s.Temperatures),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ map[string]interface{}{
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "license_state": s.LicenseState,
+ "guest-num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user-num_sta": s.UserNumSta.Val,
+ "version": s.Version,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ // "speedtest_rundate": time.Unix(int64(s.SpeedtestStatus.Rundate.Val), 0).String(),
+ },
+ )
+
+ r.addCount(usgT)
+ r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+}
+
+func (u *InfluxUnifi) batchUSGstats(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]interface{} {
+ if gw == nil {
+ return map[string]interface{}{}
+ }
+
+ return map[string]interface{}{
+ "uplink_latency": ul.Latency.Val,
+ "uplink_speed": ul.Speed.Val,
+ "speedtest-status_latency": ss.Latency.Val,
+ "speedtest-status_runtime": ss.Runtime.Val,
+ "speedtest-status_rundate": ss.Rundate.Val,
+ "speedtest-status_ping": ss.StatusPing.Val,
+ "speedtest-status_xput_download": ss.XputDownload.Val,
+ "speedtest-status_xput_upload": ss.XputUpload.Val,
+ "lan-rx_bytes": gw.LanRxBytes.Val,
+ "lan-rx_packets": gw.LanRxPackets.Val,
+ "lan-tx_bytes": gw.LanTxBytes.Val,
+ "lan-tx_packets": gw.LanTxPackets.Val,
+ "lan-rx_dropped": gw.LanRxDropped.Val,
+ }
+}
+
+func (u *InfluxUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
+ for _, wan := range wans {
+ if !wan.Up.Val {
+ continue
+ }
+
+ tags := map[string]string{
+ "device_name": tags["name"],
+ "site_name": tags["site_name"],
+ "source": tags["source"],
+ "ip": wan.IP,
+ "purpose": wan.Name,
+ "mac": wan.Mac,
+ "ifname": wan.Ifname,
+ "type": wan.Type,
+ "up": wan.Up.Txt,
+ "enabled": wan.Enable.Txt,
+ }
+ fields := map[string]interface{}{
+ "bytes-r": wan.BytesR.Val,
+ "full_duplex": wan.FullDuplex.Val,
+ "gateway": wan.Gateway,
+ "max_speed": wan.MaxSpeed.Val,
+ "rx_bytes": wan.RxBytes.Val,
+ "rx_bytes-r": wan.RxBytesR.Val,
+ "rx_dropped": wan.RxDropped.Val,
+ "rx_errors": wan.RxErrors.Val,
+ "rx_broadcast": wan.RxBroadcast.Val,
+ "rx_multicast": wan.RxMulticast.Val,
+ "rx_packets": wan.RxPackets.Val,
+ "speed": wan.Speed.Val,
+ "tx_bytes": wan.TxBytes.Val,
+ "tx_bytes-r": wan.TxBytesR.Val,
+ "tx_dropped": wan.TxDropped.Val,
+ "tx_errors": wan.TxErrors.Val,
+ "tx_packets": wan.TxPackets.Val,
+ "tx_broadcast": wan.TxBroadcast.Val,
+ "tx_multicast": wan.TxMulticast.Val,
+ }
+
+ r.send(&metric{Table: "usg_wan_ports", Tags: tags, Fields: fields})
+ }
+}
+
+func (u *InfluxUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
+ for _, p := range nt {
+ tags := map[string]string{
+ "device_name": tags["name"],
+ "site_name": tags["site_name"],
+ "source": tags["source"],
+ "up": p.Up.Txt,
+ "enabled": p.Enabled.Txt,
+ "ip": p.IP,
+ "mac": p.Mac,
+ "name": p.Name,
+ "domain_name": p.DomainName,
+ "purpose": p.Purpose,
+ "is_guest": p.IsGuest.Txt,
+ }
+ fields := map[string]interface{}{
+ "num_sta": p.NumSta.Val,
+ "rx_bytes": p.RxBytes.Val,
+ "rx_packets": p.RxPackets.Val,
+ "tx_bytes": p.TxBytes.Val,
+ "tx_packets": p.TxPackets.Val,
+ }
+
+ r.send(&metric{Table: "usg_networks", Tags: tags, Fields: fields})
+ }
+}
diff --git a/pkg/influxunifi/usw.go b/pkg/influxunifi/usw.go
new file mode 100644
index 00000000..b785998a
--- /dev/null
+++ b/pkg/influxunifi/usw.go
@@ -0,0 +1,133 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uswT is used as a name for printed/logged counters.
+const uswT = item("USW")
+
+// batchUSW generates Unifi Switch datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchUSW(r report, s *unifi.USW) {
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields := Combine(
+ u.batchUSWstat(s.Stat.Sw),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]interface{}{
+ "guest-num_sta": s.GuestNumSta.Val,
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "fan_level": s.FanLevel.Val,
+ "general_temperature": s.GeneralTemperature.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user-num_sta": s.UserNumSta.Val,
+ })
+
+ r.addCount(uswT)
+ r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
+ u.batchPortTable(r, tags, s.PortTable)
+}
+
+func (u *InfluxUnifi) batchUSWstat(sw *unifi.Sw) map[string]interface{} {
+ if sw == nil {
+ return map[string]interface{}{}
+ }
+
+ return map[string]interface{}{
+ "stat_bytes": sw.Bytes.Val,
+ "stat_rx_bytes": sw.RxBytes.Val,
+ "stat_rx_crypts": sw.RxCrypts.Val,
+ "stat_rx_dropped": sw.RxDropped.Val,
+ "stat_rx_errors": sw.RxErrors.Val,
+ "stat_rx_frags": sw.RxFrags.Val,
+ "stat_rx_packets": sw.TxPackets.Val,
+ "stat_tx_bytes": sw.TxBytes.Val,
+ "stat_tx_dropped": sw.TxDropped.Val,
+ "stat_tx_errors": sw.TxErrors.Val,
+ "stat_tx_packets": sw.TxPackets.Val,
+ "stat_tx_retries": sw.TxRetries.Val,
+ }
+}
+
+//nolint:funlen
+func (u *InfluxUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
+ for _, p := range pt {
+ if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
+ continue // only record UP ports.
+ }
+
+ tags := map[string]string{
+ "site_name": t["site_name"],
+ "device_name": t["name"],
+ "source": t["source"],
+ "type": t["type"],
+ "name": p.Name,
+ "poe_mode": p.PoeMode,
+ "port_poe": p.PortPoe.Txt,
+ "port_idx": p.PortIdx.Txt,
+ "port_id": t["name"] + " Port " + p.PortIdx.Txt,
+ "poe_enable": p.PoeEnable.Txt,
+ "flowctrl_rx": p.FlowctrlRx.Txt,
+ "flowctrl_tx": p.FlowctrlTx.Txt,
+ "media": p.Media,
+ "has_sfp": p.SFPFound.Txt,
+ "sfp_compliance": p.SFPCompliance,
+ "sfp_serial": p.SFPSerial,
+ "sfp_vendor": p.SFPVendor,
+ "sfp_part": p.SFPPart,
+ }
+ fields := map[string]interface{}{
+ "dbytes_r": p.BytesR.Val,
+ "rx_broadcast": p.RxBroadcast.Val,
+ "rx_bytes": p.RxBytes.Val,
+ "rx_bytes-r": p.RxBytesR.Val,
+ "rx_dropped": p.RxDropped.Val,
+ "rx_errors": p.RxErrors.Val,
+ "rx_multicast": p.RxMulticast.Val,
+ "rx_packets": p.RxPackets.Val,
+ "speed": p.Speed.Val,
+ "stp_pathcost": p.StpPathcost.Val,
+ "tx_broadcast": p.TxBroadcast.Val,
+ "tx_bytes": p.TxBytes.Val,
+ "tx_bytes-r": p.TxBytesR.Val,
+ "tx_dropped": p.TxDropped.Val,
+ "tx_errors": p.TxErrors.Val,
+ "tx_multicast": p.TxMulticast.Val,
+ "tx_packets": p.TxPackets.Val,
+ }
+
+ if p.PoeEnable.Val && p.PortPoe.Val {
+ fields["poe_current"] = p.PoeCurrent.Val
+ fields["poe_power"] = p.PoePower.Val
+ fields["poe_voltage"] = p.PoeVoltage.Val
+ }
+
+ if p.SFPFound.Val {
+ fields["sfp_current"] = p.SFPCurrent.Val
+ fields["sfp_voltage"] = p.SFPVoltage.Val
+ fields["sfp_temperature"] = p.SFPTemperature.Val
+ fields["sfp_txpower"] = p.SFPTxpower.Val
+ fields["sfp_rxpower"] = p.SFPRxpower.Val
+ }
+
+ r.send(&metric{Table: "usw_ports", Tags: tags, Fields: fields})
+ }
+}
diff --git a/pkg/influxunifi/uxg.go b/pkg/influxunifi/uxg.go
new file mode 100644
index 00000000..fd8c27e7
--- /dev/null
+++ b/pkg/influxunifi/uxg.go
@@ -0,0 +1,80 @@
+package influxunifi
+
+import (
+ "github.com/unpoller/unifi"
+)
+
+// uxgT is used as a name for printed/logged counters.
+const uxgT = item("UXG")
+
+// batchUXG generates 10Gb Unifi Gateway datapoints for InfluxDB.
+// These points can be passed directly to influx.
+func (u *InfluxUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
+ if !s.Adopted.Val || s.Locating.Val {
+ return
+ }
+
+ tags := map[string]string{
+ "source": s.SourceName,
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields := Combine(
+ u.batchUDMstorage(s.Storage),
+ u.batchUDMtemps(s.Temperatures),
+ u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
+ u.batchSysStats(s.SysStats, s.SystemStats),
+ map[string]interface{}{
+ "source": s.SourceName,
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "license_state": s.LicenseState,
+ "guest-num_sta": s.GuestNumSta.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ "state": s.State.Val,
+ "user-num_sta": s.UserNumSta.Val,
+ "version": s.Version,
+ "num_desktop": s.NumDesktop.Val,
+ "num_handheld": s.NumHandheld.Val,
+ "num_mobile": s.NumMobile.Val,
+ },
+ )
+
+ r.addCount(uxgT)
+ r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
+ u.batchNetTable(r, tags, s.NetworkTable)
+ u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
+
+ tags = map[string]string{
+ "mac": s.Mac,
+ "site_name": s.SiteName,
+ "source": s.SourceName,
+ "name": s.Name,
+ "version": s.Version,
+ "model": s.Model,
+ "serial": s.Serial,
+ "type": s.Type,
+ }
+ fields = Combine(
+ u.batchUSWstat(s.Stat.Sw),
+ map[string]interface{}{
+ "guest-num_sta": s.GuestNumSta.Val,
+ "ip": s.IP,
+ "bytes": s.Bytes.Val,
+ "last_seen": s.LastSeen.Val,
+ "rx_bytes": s.RxBytes.Val,
+ "tx_bytes": s.TxBytes.Val,
+ "uptime": s.Uptime.Val,
+ })
+
+ r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
+ u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
+}
diff --git a/pkg/inputunifi/LICENSE b/pkg/inputunifi/LICENSE
new file mode 100644
index 00000000..870eeecd
--- /dev/null
+++ b/pkg/inputunifi/LICENSE
@@ -0,0 +1,21 @@
+MIT LICENSE.
+Copyright (c) 2018-2021 David Newhall II
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pkg/inputunifi/README.md b/pkg/inputunifi/README.md
new file mode 100644
index 00000000..10a6daae
--- /dev/null
+++ b/pkg/inputunifi/README.md
@@ -0,0 +1,3 @@
+# inputunifi
+
+## UnPoller Input Plugin
diff --git a/pkg/inputunifi/collectevents.go b/pkg/inputunifi/collectevents.go
new file mode 100644
index 00000000..17db05c7
--- /dev/null
+++ b/pkg/inputunifi/collectevents.go
@@ -0,0 +1,158 @@
+package inputunifi
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/webserver"
+)
+
+/* Event collection. Events are also sent to the webserver for display. */
+
+func (u *InputUnifi) collectControllerEvents(c *Controller) ([]interface{}, error) {
+ if u.isNill(c) {
+ u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
+
+ if err := u.getUnifi(c); err != nil {
+ return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
+ }
+ }
+
+ var (
+ logs = []interface{}{}
+ newLogs []interface{}
+ )
+
+ // Get the sites we care about.
+ sites, err := u.getFilteredSites(c)
+ if err != nil {
+ return nil, fmt.Errorf("unifi.GetSites(): %w", err)
+ }
+
+ type caller func([]interface{}, []*unifi.Site, *Controller) ([]interface{}, error)
+
+ for _, call := range []caller{u.collectIDS, u.collectAnomalies, u.collectAlarms, u.collectEvents} {
+ if newLogs, err = call(logs, sites, c); err != nil {
+ return logs, err
+ }
+
+ logs = append(logs, newLogs...)
+ }
+
+ return logs, nil
+}
+
+func (u *InputUnifi) collectAlarms(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
+ if *c.SaveAlarms {
+ for _, s := range sites {
+ events, err := c.Unifi.GetAlarmsSite(s)
+ if err != nil {
+ return logs, fmt.Errorf("unifi.GetAlarms(): %w", err)
+ }
+
+ for _, e := range events {
+ logs = append(logs, e)
+
+ webserver.NewInputEvent(PluginName, s.ID+"_alarms", &webserver.Event{
+ Ts: e.Datetime, Msg: e.Msg, Tags: map[string]string{
+ "type": "alarm", "key": e.Key, "site_id": e.SiteID,
+ "site_name": e.SiteName, "source": e.SourceName,
+ },
+ })
+ }
+ }
+ }
+
+ return logs, nil
+}
+
+func (u *InputUnifi) collectAnomalies(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
+ if *c.SaveAnomal {
+ for _, s := range sites {
+ events, err := c.Unifi.GetAnomaliesSite(s)
+ if err != nil {
+ return logs, fmt.Errorf("unifi.GetAnomalies(): %w", err)
+ }
+
+ for _, e := range events {
+ logs = append(logs, e)
+
+ webserver.NewInputEvent(PluginName, s.ID+"_anomalies", &webserver.Event{
+ Ts: e.Datetime, Msg: e.Anomaly, Tags: map[string]string{
+ "type": "anomaly", "site_name": e.SiteName, "source": e.SourceName,
+ },
+ })
+ }
+ }
+ }
+
+ return logs, nil
+}
+
+func (u *InputUnifi) collectEvents(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
+ if *c.SaveEvents {
+ for _, s := range sites {
+ events, err := c.Unifi.GetSiteEvents(s, time.Hour)
+ if err != nil {
+ return logs, fmt.Errorf("unifi.GetEvents(): %w", err)
+ }
+
+ for _, e := range events {
+ e := redactEvent(e, c.HashPII)
+ logs = append(logs, e)
+
+ webserver.NewInputEvent(PluginName, s.ID+"_events", &webserver.Event{
+ Msg: e.Msg, Ts: e.Datetime, Tags: map[string]string{
+ "type": "event", "key": e.Key, "site_id": e.SiteID,
+ "site_name": e.SiteName, "source": e.SourceName,
+ },
+ })
+ }
+ }
+ }
+
+ return logs, nil
+}
+
+func (u *InputUnifi) collectIDS(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
+ if *c.SaveIDS {
+ for _, s := range sites {
+ events, err := c.Unifi.GetIDSSite(s)
+ if err != nil {
+ return logs, fmt.Errorf("unifi.GetIDS(): %w", err)
+ }
+
+ for _, e := range events {
+ logs = append(logs, e)
+
+ webserver.NewInputEvent(PluginName, s.ID+"_ids", &webserver.Event{
+ Ts: e.Datetime, Msg: e.Msg, Tags: map[string]string{
+ "type": "ids", "key": e.Key, "site_id": e.SiteID,
+ "site_name": e.SiteName, "source": e.SourceName,
+ },
+ })
+ }
+ }
+ }
+
+ return logs, nil
+}
+
+// redactEvent attempts to mask personally identying information from log messages.
+// This currently misses the "msg" value entirely and leaks PII information.
+func redactEvent(e *unifi.Event, hash *bool) *unifi.Event {
+ if !*hash {
+ return e
+ }
+
+ // metrics.Events[i].Msg <-- not sure what to do here.
+ e.DestIPGeo = unifi.IPGeo{}
+ e.SourceIPGeo = unifi.IPGeo{}
+ e.Host = RedactNamePII(e.Host, hash)
+ e.Hostname = RedactNamePII(e.Hostname, hash)
+ e.DstMAC = RedactMacPII(e.DstMAC, hash)
+ e.SrcMAC = RedactMacPII(e.SrcMAC, hash)
+
+ return e
+}
diff --git a/pkg/inputunifi/collector.go b/pkg/inputunifi/collector.go
new file mode 100644
index 00000000..6b2472ff
--- /dev/null
+++ b/pkg/inputunifi/collector.go
@@ -0,0 +1,269 @@
+package inputunifi
+
+// nolint: gosec
+import (
+ "crypto/md5"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+)
+
+var ErrScrapeFilterMatchFailed = fmt.Errorf("scrape filter match failed, and filter is not http URL")
+
+func (u *InputUnifi) isNill(c *Controller) bool {
+ u.RLock()
+ defer u.RUnlock()
+
+ return c.Unifi == nil
+}
+
+// newDynamicCntrlr creates and saves a controller definition for further use.
+// This is called when an unconfigured controller is requested.
+func (u *InputUnifi) newDynamicCntrlr(url string) (bool, *Controller) {
+ u.Lock()
+ defer u.Unlock()
+
+ if c := u.dynamic[url]; c != nil {
+ // it already exists.
+ return false, c
+ }
+
+ ccopy := u.Default // copy defaults into new controller
+ u.dynamic[url] = &ccopy
+ u.dynamic[url].URL = url
+
+ return true, u.dynamic[url]
+}
+
+func (u *InputUnifi) dynamicController(filter *poller.Filter) (*poller.Metrics, error) {
+ if !strings.HasPrefix(filter.Path, "http") {
+ return nil, ErrScrapeFilterMatchFailed
+ }
+
+ newCntrlr, c := u.newDynamicCntrlr(filter.Path)
+
+ if newCntrlr {
+ u.Logf("Authenticating to Dynamic UniFi Controller: %s", filter.Path)
+
+ if err := u.getUnifi(c); err != nil {
+ u.logController(c)
+ return nil, fmt.Errorf("authenticating to %s: %w", filter.Path, err)
+ }
+
+ u.logController(c)
+ }
+
+ return u.collectController(c)
+}
+
+func (u *InputUnifi) collectController(c *Controller) (*poller.Metrics, error) {
+ if u.isNill(c) {
+ u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
+
+ if err := u.getUnifi(c); err != nil {
+ return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
+ }
+ }
+
+ metrics, err := u.pollController(c)
+ if err != nil {
+ u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
+
+ if err := u.getUnifi(c); err != nil {
+ return metrics, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
+ }
+ }
+
+ return metrics, err
+}
+
+//nolint:cyclop
+func (u *InputUnifi) pollController(c *Controller) (*poller.Metrics, error) {
+ u.RLock()
+ defer u.RUnlock()
+
+ // Get the sites we care about.
+ sites, err := u.getFilteredSites(c)
+ if err != nil {
+ return nil, fmt.Errorf("unifi.GetSites(): %w", err)
+ }
+
+ m := &Metrics{TS: time.Now(), Sites: sites}
+ defer updateWeb(c, m)
+
+ if c.SaveRogue != nil && *c.SaveRogue {
+ if m.RogueAPs, err = c.Unifi.GetRogueAPs(sites); err != nil {
+ return nil, fmt.Errorf("unifi.GetRogueAPs(%s): %w", c.URL, err)
+ }
+ }
+
+ if c.SaveDPI != nil && *c.SaveDPI {
+ if m.SitesDPI, err = c.Unifi.GetSiteDPI(sites); err != nil {
+ return nil, fmt.Errorf("unifi.GetSiteDPI(%s): %w", c.URL, err)
+ }
+
+ if m.ClientsDPI, err = c.Unifi.GetClientsDPI(sites); err != nil {
+ return nil, fmt.Errorf("unifi.GetClientsDPI(%s): %w", c.URL, err)
+ }
+ }
+
+ // Get all the points.
+ if m.Clients, err = c.Unifi.GetClients(sites); err != nil {
+ return nil, fmt.Errorf("unifi.GetClients(%s): %w", c.URL, err)
+ }
+
+ if m.Devices, err = c.Unifi.GetDevices(sites); err != nil {
+ return nil, fmt.Errorf("unifi.GetDevices(%s): %w", c.URL, err)
+ }
+
+ return u.augmentMetrics(c, m), nil
+}
+
+// augmentMetrics is our middleware layer between collecting metrics and writing them.
+// This is where we can manipuate the returned data or make arbitrary decisions.
+// This method currently adds parent device names to client metrics and hashes PII.
+// This method also converts our local *Metrics type into a slice of interfaces for poller.
+func (u *InputUnifi) augmentMetrics(c *Controller, metrics *Metrics) *poller.Metrics {
+ if metrics == nil {
+ return nil
+ }
+
+ m, devices, bssdIDs := extractDevices(metrics)
+
+ // These come blank, so set them here.
+ for _, client := range metrics.Clients {
+ if devices[client.Mac] = client.Name; client.Name == "" {
+ devices[client.Mac] = client.Hostname
+ }
+
+ client.Mac = RedactMacPII(client.Mac, c.HashPII)
+ client.Name = RedactNamePII(client.Name, c.HashPII)
+ client.Hostname = RedactNamePII(client.Hostname, c.HashPII)
+ client.SwName = devices[client.SwMac]
+ client.ApName = devices[client.ApMac]
+ client.GwName = devices[client.GwMac]
+ client.RadioDescription = bssdIDs[client.Bssid] + client.RadioProto
+ m.Clients = append(m.Clients, client)
+ }
+
+ for _, client := range metrics.ClientsDPI {
+ // Name on Client DPI data also comes blank, find it based on MAC address.
+ client.Name = devices[client.MAC]
+ if client.Name == "" {
+ client.Name = client.MAC
+ }
+
+ client.Name = RedactNamePII(client.Name, c.HashPII)
+ client.MAC = RedactMacPII(client.MAC, c.HashPII)
+ m.ClientsDPI = append(m.ClientsDPI, client)
+ }
+
+ for _, ap := range metrics.RogueAPs {
+ // XXX: do we need augment this data?
+ m.RogueAPs = append(m.RogueAPs, ap)
+ }
+
+ if *c.SaveSites {
+ for _, site := range metrics.Sites {
+ m.Sites = append(m.Sites, site)
+ }
+
+ for _, site := range metrics.SitesDPI {
+ m.SitesDPI = append(m.SitesDPI, site)
+ }
+ }
+
+ return m
+}
+
+// this is a helper function for augmentMetrics.
+func extractDevices(metrics *Metrics) (*poller.Metrics, map[string]string, map[string]string) {
+ m := &poller.Metrics{TS: metrics.TS}
+ devices := make(map[string]string)
+ bssdIDs := make(map[string]string)
+
+ for _, r := range metrics.Devices.UAPs {
+ devices[r.Mac] = r.Name
+ m.Devices = append(m.Devices, r)
+
+ for _, v := range r.VapTable {
+ bssdIDs[v.Bssid] = fmt.Sprintf("%s %s %s:", r.Name, v.Radio, v.RadioName)
+ }
+ }
+
+ for _, r := range metrics.Devices.USGs {
+ devices[r.Mac] = r.Name
+ m.Devices = append(m.Devices, r)
+ }
+
+ for _, r := range metrics.Devices.USWs {
+ devices[r.Mac] = r.Name
+ m.Devices = append(m.Devices, r)
+ }
+
+ for _, r := range metrics.Devices.UDMs {
+ devices[r.Mac] = r.Name
+ m.Devices = append(m.Devices, r)
+ }
+
+ for _, r := range metrics.Devices.UXGs {
+ devices[r.Mac] = r.Name
+ m.Devices = append(m.Devices, r)
+ }
+
+ return m, devices, bssdIDs
+}
+
+// RedactNamePII converts a name string to an md5 hash (first 24 chars only).
+// Useful for maskiing out personally identifying information.
+func RedactNamePII(pii string, hash *bool) string {
+ if hash == nil || !*hash || pii == "" {
+ return pii
+ }
+
+ s := fmt.Sprintf("%x", md5.Sum([]byte(pii))) // nolint: gosec
+ // instead of 32 characters, only use 24.
+ return s[:24]
+}
+
+// RedactMacPII converts a MAC address to an md5 hashed version (first 14 chars only).
+// Useful for maskiing out personally identifying information.
+func RedactMacPII(pii string, hash *bool) (output string) {
+ if hash == nil || !*hash || pii == "" {
+ return pii
+ }
+
+ s := fmt.Sprintf("%x", md5.Sum([]byte(pii))) // nolint: gosec
+ // This formats a "fake" mac address looking string.
+ return fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s", s[:2], s[2:4], s[4:6], s[6:8], s[8:10], s[10:12], s[12:14])
+}
+
+// getFilteredSites returns a list of sites to fetch data for.
+// Omits requested but unconfigured sites. Grabs the full list from the
+// controller and returns the sites provided in the config file.
+func (u *InputUnifi) getFilteredSites(c *Controller) ([]*unifi.Site, error) {
+ u.RLock()
+ defer u.RUnlock()
+
+ sites, err := c.Unifi.GetSites()
+ if err != nil {
+ return nil, fmt.Errorf("controller: %w", err)
+ } else if len(c.Sites) == 0 || StringInSlice("all", c.Sites) {
+ return sites, nil
+ }
+
+ i := 0
+
+ for _, s := range sites {
+ // Only include valid sites in the request filter.
+ if StringInSlice(s.Name, c.Sites) {
+ sites[i] = s
+ i++
+ }
+ }
+
+ return sites[:i], nil
+}
diff --git a/pkg/inputunifi/input.go b/pkg/inputunifi/input.go
new file mode 100644
index 00000000..6e17ba41
--- /dev/null
+++ b/pkg/inputunifi/input.go
@@ -0,0 +1,339 @@
+// Package inputunifi implements the poller.Input interface and bridges the gap between
+// metrics from the unifi library, and the augments required to pump them into unifi-poller.
+package inputunifi
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+)
+
+// PluginName is the name of this input plugin.
+const PluginName = "unifi"
+
+const (
+ defaultURL = "https://127.0.0.1:8443"
+ defaultUser = "unifipoller"
+ defaultPass = "unifipoller"
+ defaultSite = "all"
+)
+
+// InputUnifi contains the running data.
+type InputUnifi struct {
+ *Config `json:"unifi" toml:"unifi" xml:"unifi" yaml:"unifi"`
+ dynamic map[string]*Controller
+ sync.Mutex // to lock the map above.
+ Logger poller.Logger
+}
+
+// Controller represents the configuration for a UniFi Controller.
+// Each polled controller may have its own configuration.
+type Controller struct {
+ VerifySSL *bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
+ SaveAnomal *bool `json:"save_anomalies" toml:"save_anomalies" xml:"save_anomalies" yaml:"save_anomalies"`
+ SaveAlarms *bool `json:"save_alarms" toml:"save_alarms" xml:"save_alarms" yaml:"save_alarms"`
+ SaveEvents *bool `json:"save_events" toml:"save_events" xml:"save_events" yaml:"save_events"`
+ SaveIDS *bool `json:"save_ids" toml:"save_ids" xml:"save_ids" yaml:"save_ids"`
+ SaveDPI *bool `json:"save_dpi" toml:"save_dpi" xml:"save_dpi" yaml:"save_dpi"`
+ SaveRogue *bool `json:"save_rogue" toml:"save_rogue" xml:"save_rogue" yaml:"save_rogue"`
+ HashPII *bool `json:"hash_pii" toml:"hash_pii" xml:"hash_pii" yaml:"hash_pii"`
+ SaveSites *bool `json:"save_sites" toml:"save_sites" xml:"save_sites" yaml:"save_sites"`
+ CertPaths []string `json:"ssl_cert_paths" toml:"ssl_cert_paths" xml:"ssl_cert_path" yaml:"ssl_cert_paths"`
+ User string `json:"user" toml:"user" xml:"user" yaml:"user"`
+ Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
+ URL string `json:"url" toml:"url" xml:"url" yaml:"url"`
+ Sites []string `json:"sites" toml:"sites" xml:"site" yaml:"sites"`
+ Unifi *unifi.Unifi `json:"-" toml:"-" xml:"-" yaml:"-"`
+ ID string `json:"id,omitempty"` // this is an output, not an input.
+}
+
+// Config contains our configuration data.
+type Config struct {
+ sync.RWMutex // locks the Unifi struct member when re-authing to unifi.
+ Default Controller `json:"defaults" toml:"defaults" xml:"default" yaml:"defaults"`
+ Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
+ Dynamic bool `json:"dynamic" toml:"dynamic" xml:"dynamic,attr" yaml:"dynamic"`
+ Controllers []*Controller `json:"controllers" toml:"controller" xml:"controller" yaml:"controllers"`
+}
+
+// Metrics is simply a useful container for everything.
+type Metrics struct {
+ TS time.Time
+ Sites []*unifi.Site
+ Clients []*unifi.Client
+ SitesDPI []*unifi.DPITable
+ ClientsDPI []*unifi.DPITable
+ RogueAPs []*unifi.RogueAP
+ Devices *unifi.Devices
+}
+
+func init() { // nolint: gochecknoinits
+ u := &InputUnifi{
+ dynamic: make(map[string]*Controller),
+ }
+
+ poller.NewInput(&poller.InputPlugin{
+ Name: PluginName,
+ Input: u, // this library implements poller.Input interface for Metrics().
+ Config: u, // Defines our config data interface.
+ })
+}
+
+// getCerts reads in cert files from disk and stores them as a slice of of byte slices.
+func (c *Controller) getCerts() ([][]byte, error) {
+ if len(c.CertPaths) == 0 {
+ return nil, nil
+ }
+
+ b := make([][]byte, len(c.CertPaths))
+
+ for i, f := range c.CertPaths {
+ d, err := os.ReadFile(f)
+ if err != nil {
+ return nil, fmt.Errorf("reading SSL cert file: %w", err)
+ }
+
+ b[i] = d
+ }
+
+ return b, nil
+}
+
+// getUnifi (re-)authenticates to a unifi controller.
+// If certificate files are provided, they are re-read.
+func (u *InputUnifi) getUnifi(c *Controller) error {
+ u.Lock()
+ defer u.Unlock()
+
+ if c.Unifi != nil {
+ c.Unifi.CloseIdleConnections()
+ }
+
+ certs, err := c.getCerts()
+ if err != nil {
+ return err
+ }
+
+ // Create an authenticated session to the Unifi Controller.
+ c.Unifi, err = unifi.NewUnifi(&unifi.Config{
+ User: c.User,
+ Pass: c.Pass,
+ URL: c.URL,
+ SSLCert: certs,
+ VerifySSL: *c.VerifySSL,
+ ErrorLog: u.LogErrorf, // Log all errors.
+ DebugLog: u.LogDebugf, // Log debug messages.
+ })
+ if err != nil {
+ c.Unifi = nil
+ return fmt.Errorf("unifi controller: %w", err)
+ }
+
+ u.LogDebugf("Authenticated with controller successfully, %s", c.URL)
+
+ return nil
+}
+
+// checkSites makes sure the list of provided sites exists on the controller.
+// This only runs once during initialization.
+func (u *InputUnifi) checkSites(c *Controller) error {
+ u.RLock()
+ defer u.RUnlock()
+
+ if len(c.Sites) == 0 || c.Sites[0] == "" {
+ c.Sites = []string{"all"}
+ }
+
+ u.LogDebugf("Checking Controller Sites List")
+
+ sites, err := c.Unifi.GetSites()
+ if err != nil {
+ return fmt.Errorf("controller: %w", err)
+ }
+
+ msg := []string{}
+ for _, site := range sites {
+ msg = append(msg, site.Name+" ("+site.Desc+")")
+ }
+
+ u.Logf("Found %d site(s) on controller %s: %v", len(msg), c.URL, strings.Join(msg, ", "))
+
+ if StringInSlice("all", c.Sites) {
+ c.Sites = []string{"all"}
+ return nil
+ }
+
+ keep := []string{}
+
+FIRST:
+ for _, s := range c.Sites {
+ for _, site := range sites {
+ if s == site.Name {
+ keep = append(keep, s)
+ continue FIRST
+ }
+ }
+ u.LogErrorf("Configured site not found on controller %s: %v", c.URL, s)
+ }
+
+ if c.Sites = keep; len(keep) == 0 {
+ c.Sites = []string{"all"}
+ }
+
+ return nil
+}
+
+func (u *InputUnifi) getPassFromFile(filename string) string {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ u.LogErrorf("Reading UniFi Password File: %v", err)
+ }
+
+ return strings.TrimSpace(string(b))
+}
+
+// setDefaults sets the default defaults.
+func (u *InputUnifi) setDefaults(c *Controller) { //nolint:cyclop
+ t := true
+ f := false
+
+ // Default defaults.
+ if c.SaveSites == nil {
+ c.SaveSites = &t
+ }
+
+ if c.VerifySSL == nil {
+ c.VerifySSL = &f
+ }
+
+ if c.HashPII == nil {
+ c.HashPII = &f
+ }
+
+ if c.SaveDPI == nil {
+ c.SaveDPI = &f
+ }
+
+ if c.SaveRogue == nil {
+ c.SaveRogue = &f
+ }
+
+ if c.SaveIDS == nil {
+ c.SaveIDS = &f
+ }
+
+ if c.SaveEvents == nil {
+ c.SaveEvents = &f
+ }
+
+ if c.SaveAlarms == nil {
+ c.SaveAlarms = &f
+ }
+
+ if c.SaveAnomal == nil {
+ c.SaveAnomal = &f
+ }
+
+ if c.URL == "" {
+ c.URL = defaultURL
+ }
+
+ if strings.HasPrefix(c.Pass, "file://") {
+ c.Pass = u.getPassFromFile(strings.TrimPrefix(c.Pass, "file://"))
+ }
+
+ if c.Pass == "" {
+ c.Pass = defaultPass
+ }
+
+ if c.User == "" {
+ c.User = defaultUser
+ }
+
+ if len(c.Sites) == 0 {
+ c.Sites = []string{defaultSite}
+ }
+}
+
+// setControllerDefaults sets defaults for the for controllers.
+// Any missing values come from defaults (above).
+func (u *InputUnifi) setControllerDefaults(c *Controller) *Controller { //nolint:cyclop,funlen
+ // Configured controller defaults.
+ if c.SaveSites == nil {
+ c.SaveSites = u.Default.SaveSites
+ }
+
+ if c.VerifySSL == nil {
+ c.VerifySSL = u.Default.VerifySSL
+ }
+
+ if c.CertPaths == nil {
+ c.CertPaths = u.Default.CertPaths
+ }
+
+ if c.HashPII == nil {
+ c.HashPII = u.Default.HashPII
+ }
+
+ if c.SaveDPI == nil {
+ c.SaveDPI = u.Default.SaveDPI
+ }
+
+ if c.SaveIDS == nil {
+ c.SaveIDS = u.Default.SaveIDS
+ }
+
+ if c.SaveRogue == nil {
+ c.SaveRogue = u.Default.SaveRogue
+ }
+
+ if c.SaveEvents == nil {
+ c.SaveEvents = u.Default.SaveEvents
+ }
+
+ if c.SaveAlarms == nil {
+ c.SaveAlarms = u.Default.SaveAlarms
+ }
+
+ if c.SaveAnomal == nil {
+ c.SaveAnomal = u.Default.SaveAnomal
+ }
+
+ if c.URL == "" {
+ c.URL = u.Default.URL
+ }
+
+ if strings.HasPrefix(c.Pass, "file://") {
+ c.Pass = u.getPassFromFile(strings.TrimPrefix(c.Pass, "file://"))
+ }
+
+ if c.Pass == "" {
+ c.Pass = u.Default.Pass
+ }
+
+ if c.User == "" {
+ c.User = u.Default.User
+ }
+
+ if len(c.Sites) == 0 {
+ c.Sites = u.Default.Sites
+ }
+
+ return c
+}
+
+// StringInSlice returns true if a string is in a slice.
+func StringInSlice(str string, slice []string) bool {
+ for _, s := range slice {
+ if strings.EqualFold(s, str) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/pkg/inputunifi/interface.go b/pkg/inputunifi/interface.go
new file mode 100644
index 00000000..7d2dd25f
--- /dev/null
+++ b/pkg/inputunifi/interface.go
@@ -0,0 +1,202 @@
+package inputunifi
+
+/* This file contains the three poller.Input interface methods. */
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+ "github.com/unpoller/unpoller/pkg/webserver"
+)
+
+var (
+ ErrDynamicLookupsDisabled = fmt.Errorf("filter path requested but dynamic lookups disabled")
+ ErrControllerNumNotFound = fmt.Errorf("controller number not found")
+ ErrNoFilterKindProvided = fmt.Errorf("must provide filter: devices, clients, other")
+)
+
+// Initialize gets called one time when starting up.
+// Satisfies poller.Input interface.
+func (u *InputUnifi) Initialize(l poller.Logger) error {
+ if u.Config == nil {
+ u.Config = &Config{Disable: true}
+ }
+
+ if u.Logger = l; u.Disable {
+ u.Logf("UniFi input plugin disabled or missing configuration!")
+ return nil
+ }
+
+ if u.setDefaults(&u.Default); len(u.Controllers) == 0 && !u.Dynamic {
+ u.Controllers = []*Controller{&u.Default}
+ }
+
+ if len(u.Controllers) == 0 {
+ u.Logf("No controllers configured. Polling dynamic controllers only! Defaults:")
+ u.logController(&u.Default)
+ }
+
+ for i, c := range u.Controllers {
+ if err := u.getUnifi(u.setControllerDefaults(c)); err != nil {
+ u.LogErrorf("Controller %d of %d Auth or Connection Error, retrying: %v", i+1, len(u.Controllers), err)
+ continue
+ }
+
+ if err := u.checkSites(c); err != nil {
+ u.LogErrorf("checking sites on %s: %v", c.URL, err)
+ }
+
+ u.Logf("Configured UniFi Controller %d of %d:", i+1, len(u.Controllers))
+ u.logController(c)
+ }
+
+ webserver.UpdateInput(&webserver.Input{Name: PluginName, Config: formatConfig(u.Config)})
+
+ return nil
+}
+
+func (u *InputUnifi) logController(c *Controller) {
+ u.Logf(" => URL: %s (verify SSL: %v)", c.URL, *c.VerifySSL)
+
+ if len(c.CertPaths) > 0 {
+ u.Logf(" => Cert Files: %s", strings.Join(c.CertPaths, ", "))
+ }
+
+ if c.Unifi != nil {
+ u.Logf(" => Version: %s (%s)", c.Unifi.ServerVersion, c.Unifi.UUID)
+ }
+
+ u.Logf(" => Username: %s (has password: %v)", c.User, c.Pass != "")
+ u.Logf(" => Hash PII / Poll Sites: %v / %s", *c.HashPII, strings.Join(c.Sites, ", "))
+ u.Logf(" => Save Sites / Save DPI: %v / %v (metrics)", *c.SaveSites, *c.SaveDPI)
+ u.Logf(" => Save Events / Save IDS: %v / %v (logs)", *c.SaveEvents, *c.SaveIDS)
+ u.Logf(" => Save Alarms / Anomalies: %v / %v (logs)", *c.SaveAlarms, *c.SaveAnomal)
+ u.Logf(" => Save Rogue APs: %v", *c.SaveRogue)
+}
+
+// Events allows you to pull only events (and IDS) from the UniFi Controller.
+// This does not fully respect HashPII, but it may in the future!
+// Use Filter.Path to pick a specific controller, otherwise poll them all!
+func (u *InputUnifi) Events(filter *poller.Filter) (*poller.Events, error) {
+ if u.Disable {
+ return nil, nil
+ }
+
+ logs := []interface{}{}
+
+ if filter == nil {
+ filter = &poller.Filter{}
+ }
+
+ for _, c := range u.Controllers {
+ if filter.Path != "" && !strings.EqualFold(c.URL, filter.Path) {
+ continue
+ }
+
+ events, err := u.collectControllerEvents(c)
+ if err != nil {
+ return nil, err
+ }
+
+ logs = append(logs, events...)
+ }
+
+ return &poller.Events{Logs: logs}, nil
+}
+
+// Metrics grabs all the measurements from a UniFi controller and returns them.
+// Set Filter.Path to a controller URL for a specific controller (or get them all).
+func (u *InputUnifi) Metrics(filter *poller.Filter) (*poller.Metrics, error) {
+ if u.Disable {
+ return nil, nil
+ }
+
+ metrics := &poller.Metrics{}
+
+ if filter == nil {
+ filter = &poller.Filter{}
+ }
+
+ // Check if the request is for an existing, configured controller (or all controllers)
+ for _, c := range u.Controllers {
+ if filter.Path != "" && !strings.EqualFold(c.URL, filter.Path) {
+ // continue only if we have a filter path and it doesn't match.
+ continue
+ }
+
+ m, err := u.collectController(c)
+ if err != nil {
+ return metrics, err
+ }
+
+ metrics = poller.AppendMetrics(metrics, m)
+ }
+
+ if filter.Path == "" || len(metrics.Clients) != 0 {
+ return metrics, nil
+ }
+
+ if !u.Dynamic {
+ return nil, ErrDynamicLookupsDisabled
+ }
+
+ // Attempt a dynamic metrics fetch from an unconfigured controller.
+ return u.dynamicController(filter)
+}
+
+// RawMetrics returns API output from the first configured UniFi controller.
+// Adjust filter.Unit to pull from a controller other than the first.
+func (u *InputUnifi) RawMetrics(filter *poller.Filter) ([]byte, error) {
+ if l := len(u.Controllers); filter.Unit >= l {
+ return nil, fmt.Errorf("%d controller(s) configured, '%d': %w", l, filter.Unit, ErrControllerNumNotFound)
+ }
+
+ c := u.Controllers[filter.Unit]
+ if u.isNill(c) {
+ u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
+
+ if err := u.getUnifi(c); err != nil {
+ return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
+ }
+ }
+
+ if err := u.checkSites(c); err != nil {
+ return nil, err
+ }
+
+ sites, err := u.getFilteredSites(c)
+ if err != nil {
+ return nil, err
+ }
+
+ switch filter.Kind {
+ case "d", "device", "devices":
+ return u.getSitesJSON(c, unifi.APIDevicePath, sites)
+ case "client", "clients", "c":
+ return u.getSitesJSON(c, unifi.APIClientPath, sites)
+ case "other", "o":
+ return c.Unifi.GetJSON(filter.Path)
+ default:
+ return []byte{}, ErrNoFilterKindProvided
+ }
+}
+
+func (u *InputUnifi) getSitesJSON(c *Controller, path string, sites []*unifi.Site) ([]byte, error) {
+ allJSON := []byte{}
+
+ for _, s := range sites {
+ apiPath := fmt.Sprintf(path, s.Name)
+ u.LogDebugf("Returning Path '%s' for site: %s (%s):\n", apiPath, s.Desc, s.Name)
+
+ body, err := c.Unifi.GetJSON(apiPath)
+ if err != nil {
+ return allJSON, fmt.Errorf("controller: %w", err)
+ }
+
+ allJSON = append(allJSON, body...)
+ }
+
+ return allJSON, nil
+}
diff --git a/pkg/inputunifi/updateweb.go b/pkg/inputunifi/updateweb.go
new file mode 100644
index 00000000..ac26b508
--- /dev/null
+++ b/pkg/inputunifi/updateweb.go
@@ -0,0 +1,214 @@
+package inputunifi
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/webserver"
+)
+
+/* This code reformats our data to be displayed on the built-in web interface. */
+
+func updateWeb(c *Controller, metrics *Metrics) {
+ webserver.UpdateInput(&webserver.Input{
+ Name: PluginName, // Forgetting this leads to 3 hours of head scratching.
+ Sites: formatSites(c, metrics.Sites),
+ Clients: formatClients(c, metrics.Clients),
+ Devices: formatDevices(c, metrics.Devices),
+ })
+}
+
+func formatConfig(config *Config) *Config {
+ return &Config{
+ Default: *formatControllers([]*Controller{&config.Default})[0],
+ Disable: config.Disable,
+ Dynamic: config.Dynamic,
+ Controllers: formatControllers(config.Controllers),
+ }
+}
+
+func formatControllers(controllers []*Controller) []*Controller {
+ fixed := []*Controller{}
+
+ for _, c := range controllers {
+ id := ""
+ if c.Unifi != nil {
+ id = c.Unifi.UUID
+ }
+
+ fixed = append(fixed, &Controller{
+ VerifySSL: c.VerifySSL,
+ SaveAnomal: c.SaveAnomal,
+ SaveAlarms: c.SaveAlarms,
+ SaveRogue: c.SaveRogue,
+ SaveEvents: c.SaveEvents,
+ SaveIDS: c.SaveIDS,
+ SaveDPI: c.SaveDPI,
+ HashPII: c.HashPII,
+ SaveSites: c.SaveSites,
+ User: c.User,
+ Pass: strconv.FormatBool(c.Pass != ""),
+ URL: c.URL,
+ Sites: c.Sites,
+ ID: id,
+ })
+ }
+
+ return fixed
+}
+
+func formatSites(c *Controller, sites []*unifi.Site) (s webserver.Sites) {
+ for _, site := range sites {
+ s = append(s, &webserver.Site{
+ ID: site.ID,
+ Name: site.Name,
+ Desc: site.Desc,
+ Source: site.SourceName,
+ Controller: c.Unifi.UUID,
+ })
+ }
+
+ return s
+}
+
+func formatClients(c *Controller, clients []*unifi.Client) (d webserver.Clients) {
+ for _, client := range clients {
+ clientType, deviceMAC := "unknown", "unknown"
+ if client.ApMac != "" {
+ clientType = "wireless"
+ deviceMAC = client.ApMac
+ } else if client.SwMac != "" {
+ clientType = "wired"
+ deviceMAC = client.SwMac
+ }
+
+ if deviceMAC == "" {
+ deviceMAC = client.GwMac
+ }
+
+ d = append(d, &webserver.Client{
+ Name: client.Name,
+ SiteID: client.SiteID,
+ Source: client.SourceName,
+ Controller: c.Unifi.UUID,
+ MAC: client.Mac,
+ IP: client.IP,
+ Type: clientType,
+ DeviceMAC: deviceMAC,
+ Rx: client.RxBytes,
+ Tx: client.TxBytes,
+ Since: time.Unix(client.FirstSeen, 0),
+ Last: time.Unix(client.LastSeen, 0),
+ })
+ }
+
+ return d
+}
+
+func formatDevices(c *Controller, devices *unifi.Devices) (d webserver.Devices) { // nolint: funlen
+ if devices == nil {
+ return d
+ }
+
+ for _, device := range devices.UAPs {
+ d = append(d, &webserver.Device{
+ Name: device.Name,
+ SiteID: device.SiteID,
+ Source: device.SourceName,
+ Controller: c.Unifi.UUID,
+ MAC: device.Mac,
+ IP: device.IP,
+ Type: device.Type,
+ Model: device.Model,
+ Version: device.Version,
+ Uptime: int(device.Uptime.Val),
+ Clients: int(device.NumSta.Val),
+ Config: nil,
+ })
+ }
+
+ for _, device := range devices.UDMs {
+ d = append(d, &webserver.Device{
+ Name: device.Name,
+ SiteID: device.SiteID,
+ Source: device.SourceName,
+ Controller: c.Unifi.UUID,
+ MAC: device.Mac,
+ IP: device.IP,
+ Type: device.Type,
+ Model: device.Model,
+ Version: device.Version,
+ Uptime: int(device.Uptime.Val),
+ Clients: int(device.NumSta.Val),
+ Config: nil,
+ })
+ }
+
+ for _, device := range devices.USWs {
+ d = append(d, &webserver.Device{
+ Name: device.Name,
+ SiteID: device.SiteID,
+ Source: device.SourceName,
+ Controller: c.Unifi.UUID,
+ MAC: device.Mac,
+ IP: device.IP,
+ Type: device.Type,
+ Model: device.Model,
+ Version: device.Version,
+ Uptime: int(device.Uptime.Val),
+ Clients: int(device.NumSta.Val),
+ Config: nil,
+ })
+ }
+
+ for _, device := range devices.USGs {
+ d = append(d, &webserver.Device{
+ Name: device.Name,
+ SiteID: device.SiteID,
+ Source: device.SourceName,
+ Controller: c.Unifi.UUID,
+ MAC: device.Mac,
+ IP: device.IP,
+ Type: device.Type,
+ Model: device.Model,
+ Version: device.Version,
+ Uptime: int(device.Uptime.Val),
+ Clients: int(device.NumSta.Val),
+ Config: nil,
+ })
+ }
+
+ return d
+}
+
+// Logf logs a message.
+func (u *InputUnifi) Logf(msg string, v ...interface{}) {
+ webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "info"},
+ })
+ u.Logger.Logf(msg, v...)
+}
+
+// LogErrorf logs an error message.
+func (u *InputUnifi) LogErrorf(msg string, v ...interface{}) {
+ webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "error"},
+ })
+ u.Logger.LogErrorf(msg, v...)
+}
+
+// LogDebugf logs a debug message.
+func (u *InputUnifi) LogDebugf(msg string, v ...interface{}) {
+ webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "debug"},
+ })
+ u.Logger.LogDebugf(msg, v...)
+}
diff --git a/pkg/lokiunifi/LICENSE b/pkg/lokiunifi/LICENSE
new file mode 100644
index 00000000..e6ac092e
--- /dev/null
+++ b/pkg/lokiunifi/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020-2021 David Newhall II
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/pkg/lokiunifi/README.md b/pkg/lokiunifi/README.md
new file mode 100644
index 00000000..ebc5f4a6
--- /dev/null
+++ b/pkg/lokiunifi/README.md
@@ -0,0 +1,26 @@
+# lokiunifi
+
+Loki Output Plugin for UnPoller
+
+This plugin writes UniFi Events and IDS data to Loki. Maybe Alarms too.
+
+Example Config:
+
+```toml
+[loki]
+ # URL is the only required setting for Loki.
+ url = "http://192.168.3.2:3100"
+
+ # How often to poll UniFi and report to Loki.
+ interval = "2m"
+
+ # How long to wait for Loki responses.
+ timeout = "5s"
+
+ # Set these to use basic auth.
+ #user = ""
+ #pass = ""
+
+ # Used for auth-less multi-tenant.
+ #tenant_id = ""
+```
diff --git a/pkg/lokiunifi/client.go b/pkg/lokiunifi/client.go
new file mode 100644
index 00000000..96610996
--- /dev/null
+++ b/pkg/lokiunifi/client.go
@@ -0,0 +1,101 @@
+package lokiunifi
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+)
+
+const (
+ lokiPushPath = "/loki/api/v1/push"
+)
+
+var errStatusCode = fmt.Errorf("unexpected HTTP status code")
+
+// Client holds the http client for contacting Loki.
+type Client struct {
+ *Config
+ *http.Client
+}
+
+func (l *Loki) httpClient() *Client {
+ return &Client{
+ Config: l.Config,
+ Client: &http.Client{
+ Timeout: l.Timeout.Duration,
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: !l.VerifySSL, // nolint: gosec
+ },
+ },
+ },
+ }
+}
+
+// Post marshals and posts a batch of log messages.
+func (c *Client) Post(logs interface{}) error {
+ msg, err := json.Marshal(logs)
+ if err != nil {
+ return fmt.Errorf("json marshal: %w", err)
+ }
+
+ u := strings.TrimSuffix(c.URL, lokiPushPath) + lokiPushPath
+
+ req, err := c.NewRequest(u, "POST", "application/json", msg)
+ if err != nil {
+ return err
+ }
+
+ if code, body, err := c.Do(req); err != nil {
+ return err
+ } else if code != http.StatusNoContent {
+ m := fmt.Sprintf("%s (%d/%s) %s, msg: %s", u, code, http.StatusText(code),
+ strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " ")), msg)
+
+ return fmt.Errorf("%s: %w", m, errStatusCode)
+ }
+
+ return nil
+}
+
+// NewRequest creates the http request based on input data.
+func (c *Client) NewRequest(url, method, cType string, msg []byte) (*http.Request, error) {
+ req, err := http.NewRequest(method, url, bytes.NewBuffer(msg)) //nolint:noctx
+ if err != nil {
+ return nil, fmt.Errorf("creating request: %w", err)
+ }
+
+ if cType != "" {
+ req.Header.Set("Content-Type", cType)
+ }
+
+ if c.Username != "" || c.Password != "" {
+ req.SetBasicAuth(c.Username, c.Password)
+ }
+
+ if c.TenantID != "" {
+ req.Header.Set("X-Scope-OrgID", c.TenantID)
+ }
+
+ return req, nil
+}
+
+// Do makes an http request and returns the status code, body and/or an error.
+func (c *Client) Do(req *http.Request) (int, []byte, error) {
+ resp, err := c.Client.Do(req)
+ if err != nil {
+ return 0, nil, fmt.Errorf("making request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return resp.StatusCode, body, fmt.Errorf("reading body: %w", err)
+ }
+
+ return resp.StatusCode, body, nil
+}
diff --git a/pkg/lokiunifi/logger.go b/pkg/lokiunifi/logger.go
new file mode 100644
index 00000000..831c6258
--- /dev/null
+++ b/pkg/lokiunifi/logger.go
@@ -0,0 +1,38 @@
+package lokiunifi
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/unpoller/unpoller/pkg/webserver"
+)
+
+// Logf logs a message.
+func (l *Loki) Logf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "info"},
+ })
+ l.Collect.Logf(msg, v...)
+}
+
+// LogErrorf logs an error message.
+func (l *Loki) LogErrorf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "error"},
+ })
+ l.Collect.LogErrorf(msg, v...)
+}
+
+// LogDebugf logs a debug message.
+func (l *Loki) LogDebugf(msg string, v ...interface{}) {
+ webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
+ Ts: time.Now(),
+ Msg: fmt.Sprintf(msg, v...),
+ Tags: map[string]string{"type": "debug"},
+ })
+ l.Collect.LogDebugf(msg, v...)
+}
diff --git a/pkg/lokiunifi/loki.go b/pkg/lokiunifi/loki.go
new file mode 100644
index 00000000..0f54c39e
--- /dev/null
+++ b/pkg/lokiunifi/loki.go
@@ -0,0 +1,143 @@
+package lokiunifi
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/unpoller/unpoller/pkg/poller"
+ "github.com/unpoller/unpoller/pkg/webserver"
+ "golift.io/cnfg"
+)
+
+const (
+ maxInterval = 10 * time.Minute
+ minInterval = 10 * time.Second
+ defaultTimeout = 10 * time.Second
+ defaultInterval = 2 * time.Minute
+)
+
+const (
+ // InputName is the name of plugin that gives us data.
+ InputName = "unifi"
+ // PluginName is the name of this plugin.
+ PluginName = "loki"
+)
+
+// Config is the plugin's input data.
+type Config struct {
+ Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
+ VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
+ URL string `json:"url" toml:"url" xml:"url" yaml:"url"`
+ Username string `json:"user" toml:"user" xml:"user" yaml:"user"`
+ Password string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
+ TenantID string `json:"tenant_id" toml:"tenant_id" xml:"tenant_id" yaml:"tenant_id"`
+ Interval cnfg.Duration `json:"interval" toml:"interval" xml:"interval" yaml:"interval"`
+ Timeout cnfg.Duration `json:"timeout" toml:"timeout" xml:"timeout" yaml:"timeout"`
+}
+
+// Loki is the main library struct. This satisfies the poller.Output interface.
+type Loki struct {
+ Collect poller.Collect
+ *Config `json:"loki" toml:"loki" xml:"loki" yaml:"loki"`
+ client *Client
+ last time.Time
+}
+
+// init is how this modular code is initialized by the main app.
+// This module adds itself as an output module to the poller core.
+func init() { // nolint: gochecknoinits
+ l := &Loki{Config: &Config{
+ Interval: cnfg.Duration{Duration: defaultInterval},
+ Timeout: cnfg.Duration{Duration: defaultTimeout},
+ }}
+
+ poller.NewOutput(&poller.Output{
+ Name: PluginName,
+ Config: l,
+ Method: l.Run,
+ })
+}
+
+// Run is fired from the poller library after the Config is unmarshalled.
+func (l *Loki) Run(collect poller.Collect) error {
+ if l.Collect = collect; l.Config == nil || l.URL == "" || l.Disable {
+ l.Logf("Loki config missing (or disabled), Loki output disabled!")
+ return nil
+ }
+
+ l.ValidateConfig()
+
+ fake := *l.Config
+ fake.Password = strconv.FormatBool(fake.Password != "")
+
+ webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: fake})
+ l.PollController()
+ l.LogErrorf("Loki Output Plugin Stopped!")
+
+ return nil
+}
+
+// ValidateConfig sets initial "last" update time. Also creates an http client,
+// makes sure URL is sane, and sets interval within min/max limits.
+func (l *Loki) ValidateConfig() {
+ if l.Interval.Duration > maxInterval {
+ l.Interval.Duration = maxInterval
+ } else if l.Interval.Duration < minInterval {
+ l.Interval.Duration = minInterval
+ }
+
+ if strings.HasPrefix(l.Password, "file://") {
+ pass, err := os.ReadFile(strings.TrimPrefix(l.Password, "file://"))
+ if err != nil {
+ l.LogErrorf("Reading Loki Password File: %v", err)
+ }
+
+ l.Password = strings.TrimSpace(string(pass))
+ }
+
+ l.last = time.Now().Add(-l.Interval.Duration)
+ l.client = l.httpClient()
+ l.URL = strings.TrimRight(l.URL, "/") // gets a path appended to it later.
+}
+
+// PollController runs forever, polling UniFi for events and pushing them to Loki.
+// This is started by Run().
+func (l *Loki) PollController() {
+ interval := l.Interval.Round(time.Second)
+ l.Logf("Loki Event collection started, interval: %v, URL: %s", interval, l.URL)
+
+ ticker := time.NewTicker(interval)
+ for start := range ticker.C {
+ events, err := l.Collect.Events(&poller.Filter{Name: InputName})
+ if err != nil {
+ l.LogErrorf("event fetch for Loki failed: %v", err)
+ continue
+ }
+
+ err = l.ProcessEvents(l.NewReport(start), events)
+ if err != nil {
+ l.LogErrorf("%v", err)
+ }
+ }
+}
+
+// ProcessEvents offloads some of the loop from PollController.
+func (l *Loki) ProcessEvents(report *Report, events *poller.Events) error {
+ // Sometimes it gets stuck on old messages. This gets it past that.
+ if time.Since(l.last) > 4*l.Interval.Duration {
+ l.last = time.Now().Add(-4 * l.Interval.Duration)
+ }
+
+ logs := report.ProcessEventLogs(events)
+ if err := l.client.Post(logs); err != nil {
+ return fmt.Errorf("sending to Loki failed: %w", err)
+ }
+
+ l.last = report.Start
+ l.Logf("Events sent to Loki. %v", report)
+
+ return nil
+}
diff --git a/pkg/lokiunifi/report.go b/pkg/lokiunifi/report.go
new file mode 100644
index 00000000..edc876c5
--- /dev/null
+++ b/pkg/lokiunifi/report.go
@@ -0,0 +1,82 @@
+package lokiunifi
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/unpoller/unifi"
+ "github.com/unpoller/unpoller/pkg/poller"
+)
+
+// LogStream contains a stream of logs (like a log file).
+// This app uses one stream per log entry because each log may have different labels.
+type LogStream struct {
+ Labels map[string]string `json:"stream"` // "the file name"
+ Entries [][]string `json:"values"` // "the log lines"
+}
+
+// Logs is the main logs-holding structure. This is the Loki-output format.
+type Logs struct {
+ Streams []LogStream `json:"streams"` // "multiple files"
+}
+
+// Report is the temporary data generated by processing events.
+type Report struct {
+ Start time.Time
+ Oldest time.Time
+ poller.Logger
+ Counts map[string]int
+}
+
+// NewReport makes a new report.
+func (l *Loki) NewReport(start time.Time) *Report {
+ return &Report{
+ Start: start,
+ Oldest: l.last,
+ Logger: l,
+ Counts: make(map[string]int),
+ }
+}
+
+// ProcessEventLogs loops the event Logs, matches the interface type, calls the
+// appropriate method for the data, and compiles the Logs into a Loki format.
+// This runs once per interval, if there was no collection error.
+func (r *Report) ProcessEventLogs(events *poller.Events) *Logs {
+ logs := &Logs{}
+
+ for _, e := range events.Logs {
+ switch event := e.(type) {
+ case *unifi.IDS:
+ r.IDS(event, logs)
+ case *unifi.Event:
+ r.Event(event, logs)
+ case *unifi.Alarm:
+ r.Alarm(event, logs)
+ case *unifi.Anomaly:
+ r.Anomaly(event, logs)
+ default: // unlikely.
+ r.LogErrorf("unknown event type: %T", e)
+ }
+ }
+
+ return logs
+}
+
+func (r *Report) String() string {
+ return fmt.Sprintf("%s: %d, %s: %d, %s: %d, %s: %d, Dur: %v",
+ typeEvent, r.Counts[typeEvent], typeIDS, r.Counts[typeIDS],
+ typeAlarm, r.Counts[typeAlarm], typeAnomaly, r.Counts[typeAnomaly],
+ time.Since(r.Start).Round(time.Millisecond))
+}
+
+// CleanLabels removes any tag that is empty.
+func CleanLabels(labels map[string]string) map[string]string {
+ for i := range labels {
+ if strings.TrimSpace(labels[i]) == "" {
+ delete(labels, i)
+ }
+ }
+
+ return labels
+}
diff --git a/pkg/lokiunifi/report_alarm.go b/pkg/lokiunifi/report_alarm.go
new file mode 100644
index 00000000..5c039f84
--- /dev/null
+++ b/pkg/lokiunifi/report_alarm.go
@@ -0,0 +1,37 @@
+package lokiunifi
+
+import (
+ "strconv"
+
+ "github.com/unpoller/unifi"
+)
+
+const typeAlarm = "Alarm"
+
+// Alarm stores a structured Alarm for batch sending to Loki.
+func (r *Report) Alarm(event *unifi.Alarm, logs *Logs) {
+ if event.Datetime.Before(r.Oldest) {
+ return
+ }
+
+ r.Counts[typeAlarm]++ // increase counter and append new log line.
+
+ logs.Streams = append(logs.Streams, LogStream{
+ Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
+ Labels: CleanLabels(map[string]string{
+ "application": "unifi_alarm",
+ "source": event.SourceName,
+ "site_name": event.SiteName,
+ "subsystem": event.Subsystem,
+ "category": event.Catname,
+ "event_type": event.EventType,
+ "key": event.Key,
+ "app_protocol": event.AppProto,
+ "protocol": event.Proto,
+ "interface": event.InIface,
+ "src_country": event.SrcIPCountry,
+ "usgip": event.USGIP,
+ "action": event.InnerAlertAction,
+ }),
+ })
+}
diff --git a/pkg/lokiunifi/report_anomaly.go b/pkg/lokiunifi/report_anomaly.go
new file mode 100644
index 00000000..bce0fc4a
--- /dev/null
+++ b/pkg/lokiunifi/report_anomaly.go
@@ -0,0 +1,28 @@
+package lokiunifi
+
+import (
+ "strconv"
+
+ "github.com/unpoller/unifi"
+)
+
+const typeAnomaly = "Anomaly"
+
+// Anomaly stores a structured Anomaly for batch sending to Loki.
+func (r *Report) Anomaly(event *unifi.Anomaly, logs *Logs) {
+ if event.Datetime.Before(r.Oldest) {
+ return
+ }
+
+ r.Counts[typeAnomaly]++ // increase counter and append new log line.
+
+ logs.Streams = append(logs.Streams, LogStream{
+ Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Anomaly}},
+ Labels: CleanLabels(map[string]string{
+ "application": "unifi_anomaly",
+ "source": event.SourceName,
+ "site_name": event.SiteName,
+ "device_mac": event.DeviceMAC,
+ }),
+ })
+}
diff --git a/pkg/lokiunifi/report_event.go b/pkg/lokiunifi/report_event.go
new file mode 100644
index 00000000..33037985
--- /dev/null
+++ b/pkg/lokiunifi/report_event.go
@@ -0,0 +1,54 @@
+package lokiunifi
+
+import (
+ "strconv"
+
+ "github.com/unpoller/unifi"
+)
+
+const typeEvent = "Event"
+
+// Event stores a structured UniFi Event for batch sending to Loki.
+func (r *Report) Event(event *unifi.Event, logs *Logs) {
+ if event.Datetime.Before(r.Oldest) {
+ return
+ }
+
+ r.Counts[typeEvent]++ // increase counter and append new log line.
+
+ logs.Streams = append(logs.Streams, LogStream{
+ Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
+ Labels: CleanLabels(map[string]string{
+ "application": "unifi_event",
+ "admin": event.Admin, // username
+ "site_name": event.SiteName,
+ "source": event.SourceName,
+ "subsystem": event.Subsystem,
+ "ap_from": event.ApFrom,
+ "ap_to": event.ApTo,
+ "ap": event.Ap,
+ "ap_name": event.ApName,
+ "gw": event.Gw,
+ "gw_name": event.GwName,
+ "sw": event.Sw,
+ "sw_name": event.SwName,
+ "category": event.Catname,
+ "radio": event.Radio,
+ "radio_from": event.RadioFrom,
+ "radio_to": event.RadioTo,
+ "key": event.Key,
+ "interface": event.InIface,
+ "event_type": event.EventType,
+ "ssid": event.SSID,
+ "channel": event.Channel.Txt,
+ "channel_from": event.ChannelFrom.Txt,
+ "channel_to": event.ChannelTo.Txt,
+ "usgip": event.USGIP,
+ "network": event.Network,
+ "app_protocol": event.AppProto,
+ "protocol": event.Proto,
+ "action": event.InnerAlertAction,
+ "src_country": event.SrcIPCountry,
+ }),
+ })
+}
diff --git a/pkg/lokiunifi/report_ids.go b/pkg/lokiunifi/report_ids.go
new file mode 100644
index 00000000..c43e1614
--- /dev/null
+++ b/pkg/lokiunifi/report_ids.go
@@ -0,0 +1,37 @@
+package lokiunifi
+
+import (
+ "strconv"
+
+ "github.com/unpoller/unifi"
+)
+
+const typeIDS = "IDS"
+
+// event stores a structured event Event for batch sending to Loki.
+func (r *Report) IDS(event *unifi.IDS, logs *Logs) {
+ if event.Datetime.Before(r.Oldest) {
+ return
+ }
+
+ r.Counts[typeIDS]++ // increase counter and append new log line.
+
+ logs.Streams = append(logs.Streams, LogStream{
+ Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
+ Labels: CleanLabels(map[string]string{
+ "application": "unifi_ids",
+ "source": event.SourceName,
+ "site_name": event.SiteName,
+ "subsystem": event.Subsystem,
+ "category": event.Catname,
+ "event_type": event.EventType,
+ "key": event.Key,
+ "app_protocol": event.AppProto,
+ "protocol": event.Proto,
+ "interface": event.InIface,
+ "src_country": event.SrcIPCountry,
+ "usgip": event.USGIP,
+ "action": event.InnerAlertAction,
+ }),
+ })
+}
diff --git a/pkg/mysqlunifi/.gitignore b/pkg/mysqlunifi/.gitignore
new file mode 100644
index 00000000..140f8cf8
--- /dev/null
+++ b/pkg/mysqlunifi/.gitignore
@@ -0,0 +1 @@
+*.so
diff --git a/pkg/mysqlunifi/LICENSE b/pkg/mysqlunifi/LICENSE
new file mode 100644
index 00000000..6d5fa682
--- /dev/null
+++ b/pkg/mysqlunifi/LICENSE
@@ -0,0 +1,21 @@
+MIT LICENSE.
+Copyright (c) 2018-2020 David Newhall II
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pkg/mysqlunifi/README.md b/pkg/mysqlunifi/README.md
new file mode 100644
index 00000000..4a3379c0
--- /dev/null
+++ b/pkg/mysqlunifi/README.md
@@ -0,0 +1,30 @@
+# MYSQL Output Plugin Example
+
+This plugin is not finished and did not get finished for the release of poller v2.
+Sorry about that. I'll try to get it working soon! 2/4/20
+
+The code here, and the dynamic plugin provided shows an example of how you can
+write your own output for unifi-poller. This plugin records some very basic
+data about clients on a unifi network into a mysql database.
+
+You could write outputs that do... anything. An example: They could compare current
+connected clients to a previous list (in a db, or stored in memory), and send a
+notification if it changes. The possibilities are endless.
+
+You must compile your plugin using the unifi-poller source for the version you're
+using. In other words, to build a plugin for version 2.0.1, do this:
+
+```bash
+mkdir -p $GOPATH/src/github.com/unifi-poller
+cd $GOPATH/src/github.com/unifi-poller
+
+git clone git@github.com:unifi-poller/unifi-poller.git
+cd unifi-poller
+
+git checkout v2.0.1
+
+cp -r