Merge pull request #445 from unpoller/merge-them-all
Merge integration and core repos into unpoller
This commit is contained in:
commit
11a65c76b3
|
|
@ -81,6 +81,7 @@ documentation support. This project succeeds because of them. Thank you!
|
|||
</p>
|
||||
|
||||
## Copyright & License
|
||||
|
||||
<img style="float: right;" align="right" width="200px" src="https://unpoller.com/img/unpoller.png">
|
||||
|
||||
- Copyright © 2018-2020 David Newhall II.
|
||||
|
|
|
|||
35
go.mod
35
go.mod
|
|
@ -1,12 +1,33 @@
|
|||
module github.com/unpoller/unpoller
|
||||
|
||||
go 1.16
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224
|
||||
github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c
|
||||
github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b
|
||||
github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e
|
||||
github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.29.0
|
||||
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c
|
||||
golang.org/x/crypto v0.3.0
|
||||
golang.org/x/term v0.2.0
|
||||
golift.io/cnfg v0.1.1
|
||||
golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5
|
||||
golift.io/version v0.0.2
|
||||
)
|
||||
|
||||
require golang.org/x/net v0.2.0 // indirect
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/golang/protobuf v1.4.3 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/unpoller/unifi v0.1.0
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0-rc.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
|
|
|||
271
go.sum
271
go.sum
|
|
@ -31,92 +31,48 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible h1:Dq8Dr+4sV1gBO1sHDWdW+4G+PdsA+YSJOK925MxrrCY=
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -140,11 +96,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
|
@ -168,60 +121,23 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
|
@ -231,177 +147,70 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c h1:zqmyTlQyufRC65JnImJ6H1Sf7BDj8bG31EV919NVEQc=
|
||||
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224 h1:avnWIPsXSuOIT1x2oImsbOUpLvC0ACQxsPmhYm5P8/E=
|
||||
github.com/unpoller/datadogunifi v0.0.0-20221124011555-8037ce373224/go.mod h1:/E0LxkzsPngrP+hevAaXOjVCOr8JWRosGbrvlV6reIk=
|
||||
github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c h1:T+T+jWgL3+4Bgy3VuTLNJLoShvmrfPuH7DxaYeB0gho=
|
||||
github.com/unpoller/influxunifi v0.0.0-20210623102357-4b2dc7fa818c/go.mod h1:GHqTS6Ry8fcVDPoPuIhI6e7HPVH6tSOZIJsQ5h2zmJo=
|
||||
github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b h1:dHFTRAxwm064wPA4SOijcMfOqayrywn5foKqz7iU2BQ=
|
||||
github.com/unpoller/inputunifi v0.0.0-20210623102218-06574d44cc6b/go.mod h1:Q8i0Sgji6D7zgRB11YoDmrmF17EEp+DtAH151IQaMKg=
|
||||
github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a h1:uYVTxnhNssppXV1R92GbQ5RxTYbjfB94xmwobcw/FbQ=
|
||||
github.com/unpoller/lokiunifi v0.0.0-20210623102057-0902524b6a8a/go.mod h1:TVrEHyRoUWJczzYFuP6tCS3mR1gAA7+LV/YILVGEoLs=
|
||||
github.com/unpoller/poller v0.0.0-20210623101401-f12841d79a28 h1:YAv5naMdpOFahnxteFFRidZlrSEwLv8V2nBKJKmLmHg=
|
||||
github.com/unpoller/poller v0.0.0-20210623101401-f12841d79a28/go.mod h1:AbDp60t5WlLSRELAliMJ0RFQpm/0yXpyolVSZqNtero=
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e h1:tNBIBCmtc7whuhkjKyEzpU3OHzYHyGCBy/LERhHxh3A=
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e/go.mod h1:AbDp60t5WlLSRELAliMJ0RFQpm/0yXpyolVSZqNtero=
|
||||
github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99 h1:6x0qUKxU/A5UOUSoUGLbDuaGrXlRkOvdiWDGLnNC8BA=
|
||||
github.com/unpoller/promunifi v0.0.0-20210623101918-b986e661ac99/go.mod h1:xZQ+DIFUlI6XJqLHLEXxujWQwSzbESNtHtC0+njvOGA=
|
||||
github.com/unpoller/unifi v0.0.0-20210914213836-fd3c38c905a3/go.mod h1:K9QFFGfZws4gzB+Popix19S/rBKqrtqI+tyPORyg3F0=
|
||||
github.com/unpoller/unifi v0.0.0-20221124010147-8d83427af67b/go.mod h1:pJGPtjikPcYO+rZMpgYOj6Zs044Dl4R+u3MsV3TMenk=
|
||||
github.com/unpoller/unifi v0.0.9-0.20210623100314-3dccfdbc4c80 h1:XjHGfJhMwnB63DYHgtWGJgDxLhxVcAOtf+cfuvpGoyo=
|
||||
github.com/unpoller/unifi v0.0.9-0.20210623100314-3dccfdbc4c80/go.mod h1:K9QFFGfZws4gzB+Popix19S/rBKqrtqI+tyPORyg3F0=
|
||||
github.com/unpoller/webserver v0.0.0-20210623101543-90d89bb0acdf h1:HhXi3qca3kyFEFPh0mqdr0bpQs94hJvMbUJztwPtf2A=
|
||||
github.com/unpoller/webserver v0.0.0-20210623101543-90d89bb0acdf/go.mod h1:77PywuUvspdtoRuH1htFhR3Tp0pLyWj6kJlYR4tBYho=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/unpoller/unifi v0.1.0 h1:aN3DrL3C+lkAN9/yOvrv+2fiSm/IXswdDVi8v42P4vY=
|
||||
github.com/unpoller/unifi v0.1.0/go.mod h1:iZA8XU8CkuKHKcmK8me2zWBceAxnlpd6pEQGOvIPRY8=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -434,13 +243,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
|
@ -451,7 +255,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
|
@ -466,11 +269,9 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -486,15 +287,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -504,10 +299,8 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -527,34 +320,28 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
|
|
@ -564,8 +351,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
|
@ -573,7 +358,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
|
|
@ -597,11 +381,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golift.io/cnfg v0.0.7 h1:qkNpP5Bq+5Gtoc6HcI8kapMD5zFOVan6qguxqBQF3OY=
|
||||
golift.io/cnfg v0.0.7/go.mod h1:AsB0DJe7nv0bizKaoy3e3MjjOF7upTpMOMvsfv4CNNk=
|
||||
golift.io/cnfg v0.1.1 h1:8T20+WhEgfThCu5D/Tf5CCFGORSCrTB8cgR29sXTpUE=
|
||||
golift.io/cnfg v0.1.1/go.mod h1:cjgsYXSEgyWJEbSk+QehZuGN26jw+1CzwceGCbJ0Lck=
|
||||
golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5 h1:W9EKFeDCvnMCXrXZ/z33fmDZZmUQYJPkUqlQ95Sbg+o=
|
||||
golift.io/cnfgfile v0.0.0-20220509075834-08755d9ef3f5/go.mod h1:oXt/WBhSizIK6GmS2ka85IQ4TLmL2BFh4jHKR0bbThI=
|
||||
golift.io/version v0.0.2 h1:i0gXRuSDHKs4O0sVDUg4+vNIuOxYoXhaxspftu2FRTE=
|
||||
golift.io/version v0.0.2/go.mod h1:76aHNz8/Pm7CbuxIsDi97jABL5Zui3f2uZxDm4vB6hU=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
|
|
@ -619,7 +404,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
|
|
@ -630,7 +414,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
|
|
@ -655,15 +438,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
|
|
@ -682,33 +460,22 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
@ -719,5 +486,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
|
|||
12
main.go
12
main.go
|
|
@ -6,14 +6,14 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/poller"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
// Load input plugins!
|
||||
_ "github.com/unpoller/inputunifi"
|
||||
_ "github.com/unpoller/unpoller/pkg/inputunifi"
|
||||
// Load output plugins!
|
||||
_ "github.com/unpoller/datadogunifi"
|
||||
_ "github.com/unpoller/influxunifi"
|
||||
_ "github.com/unpoller/lokiunifi"
|
||||
_ "github.com/unpoller/promunifi"
|
||||
_ "github.com/unpoller/unpoller/pkg/datadogunifi"
|
||||
_ "github.com/unpoller/unpoller/pkg/influxunifi"
|
||||
_ "github.com/unpoller/unpoller/pkg/lokiunifi"
|
||||
_ "github.com/unpoller/unpoller/pkg/promunifi"
|
||||
)
|
||||
|
||||
// Keep it simple.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2020 Cody Lee
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
# datadogunifi
|
||||
|
||||
UniFi Poller Output Plugin for DataDog
|
||||
|
||||
## Configuration
|
||||
|
||||
```yaml
|
||||
datadog:
|
||||
# How often to poll UniFi and report to Datadog.
|
||||
interval: "2m"
|
||||
|
||||
# To disable this output plugin
|
||||
disable: false
|
||||
|
||||
# Datadog Custom Options
|
||||
|
||||
# address to talk to the datadog agent, by default this uses the local statsd UDP interface
|
||||
# address: "..."
|
||||
|
||||
# namespace to prepend to all data
|
||||
# namespace: ""
|
||||
|
||||
# tags to append to all data
|
||||
# tags:
|
||||
# - foo
|
||||
|
||||
# max_bytes_per_payload is the maximum number of bytes a single payload will contain.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 1432 for UDP and 8192 for UDS.
|
||||
# max_bytes_per_payload: 0
|
||||
|
||||
# max_messages_per_payload is the maximum number of metrics, events and/or service checks a single payload will contain.
|
||||
# This option can be set to `1` to create an unbuffered client.
|
||||
# max_messages_per_payload: 0
|
||||
|
||||
# BufferPoolSize is the size of the pool of buffers in number of buffers.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
# buffer_pool_size: 0
|
||||
|
||||
# buffer_flush_interval is the interval after which the current buffer will get flushed.
|
||||
# buffer_flush_interval: 0
|
||||
|
||||
# buffer_shard_count is the number of buffer "shards" that will be used.
|
||||
# Those shards allows the use of multiple buffers at the same time to reduce
|
||||
# lock contention.
|
||||
# buffer_shard_count: 0
|
||||
|
||||
# sender_queue_size is the size of the sender queue in number of buffers.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
# sender_queue_size: 0
|
||||
|
||||
# write_timeout_uds is the timeout after which a UDS packet is dropped.
|
||||
# write_timeout_uds: 5000
|
||||
|
||||
# receive_mode determines the behavior of the client when receiving to many
|
||||
# metrics. The client will either drop the metrics if its buffers are
|
||||
# full (ChannelMode mode) or block the caller until the metric can be
|
||||
# handled (MutexMode mode). By default the client will MutexMode. This
|
||||
# option should be set to ChannelMode only when use under very high
|
||||
# load.
|
||||
#
|
||||
# MutexMode uses a mutex internally which is much faster than
|
||||
# channel but causes some lock contention when used with a high number
|
||||
# of threads. Mutex are sharded based on the metrics name which
|
||||
# limit mutex contention when goroutines send different metrics.
|
||||
#
|
||||
# ChannelMode: uses channel (of ChannelModeBufferSize size) to send
|
||||
# metrics and drop metrics if the channel is full. Sending metrics in
|
||||
# this mode is slower that MutexMode (because of the channel), but
|
||||
# will not block the application. This mode is made for application
|
||||
# using many goroutines, sending the same metrics at a very high
|
||||
# volume. The goal is to not slow down the application at the cost of
|
||||
# dropping metrics and having a lower max throughput.
|
||||
# receive_mode: 0
|
||||
|
||||
# channel_mode_buffer_size is the size of the channel holding incoming metrics
|
||||
# channel_mode_buffer_size: 0
|
||||
|
||||
# aggregation_flush_interval is the interval for the aggregator to flush metrics
|
||||
# aggregation_flush_interval: 0
|
||||
```
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const (
|
||||
alarmT = item("Alarm")
|
||||
anomalyT = item("Anomaly")
|
||||
)
|
||||
|
||||
// batchAlarms generates alarm events and logs for Datadog.
|
||||
func (u *DatadogUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dupl
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"dst_port": strconv.Itoa(event.DestPort),
|
||||
"src_port": strconv.Itoa(event.SrcPort),
|
||||
"dest_ip": event.DestIP,
|
||||
"dst_mac": event.DstMAC,
|
||||
"host": event.Host,
|
||||
"msg": event.Msg,
|
||||
"src_ip": event.SrcIP,
|
||||
"src_mac": event.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", event.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Longitude),
|
||||
"dst_ip_city": event.DestIPGeo.City,
|
||||
"dst_ip_continent_code": event.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": event.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": event.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": event.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", event.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Longitude),
|
||||
"src_ip_city": event.SourceIPGeo.City,
|
||||
"src_ip_continent_code": event.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": event.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": event.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": event.SourceIPGeo.Organization,
|
||||
"site_name": event.SiteName,
|
||||
"source": event.SourceName,
|
||||
"in_iface": event.InIface,
|
||||
"event_type": event.EventType,
|
||||
"subsystem": event.Subsystem,
|
||||
"archived": event.Archived.Txt,
|
||||
"usg_ip": event.USGIP,
|
||||
"proto": event.Proto,
|
||||
"key": event.Key,
|
||||
"catname": event.Catname,
|
||||
"app_proto": event.AppProto,
|
||||
"action": event.InnerAlertAction,
|
||||
}
|
||||
r.addCount(alarmT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("[%s][%s] Alarm at %s from %s", event.EventType, event.Catname, event.SiteName, event.SourceName)
|
||||
_ = r.reportEvent(title, event.Datetime, event.Msg, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
||||
// batchAnomaly generates Anomalies from UniFi for Datadog.
|
||||
func (u *DatadogUnifi) batchAnomaly(r report, event *unifi.Anomaly) {
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
r.addCount(anomalyT)
|
||||
|
||||
tagMap := cleanTags(map[string]string{
|
||||
"application": "unifi_anomaly",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"device_mac": event.DeviceMAC,
|
||||
})
|
||||
tags := tagMapToTags(tagMap)
|
||||
|
||||
title := fmt.Sprintf("Anomaly detected at %s from %s", event.SiteName, event.SourceName)
|
||||
_ = r.reportEvent(title, event.Datetime, event.Anomaly, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Anomaly, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
|
@ -0,0 +1,189 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// batchClient generates Unifi Client datapoints for Datadog.
|
||||
// These points can be passed directly to Datadog.
|
||||
func (u *DatadogUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"ap_name": s.ApName,
|
||||
"gw_name": s.GwName,
|
||||
"sw_name": s.SwName,
|
||||
"oui": s.Oui,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"radio_proto": s.RadioProto,
|
||||
"name": s.Name,
|
||||
"fixed_ip": s.FixedIP,
|
||||
"sw_port": s.SwPort.Txt,
|
||||
"os_class": s.OsClass.Txt,
|
||||
"os_name": s.OsName.Txt,
|
||||
"dev_cat": s.DevCat.Txt,
|
||||
"dev_id": s.DevID.Txt,
|
||||
"dev_vendor": s.DevVendor.Txt,
|
||||
"dev_family": s.DevFamily.Txt,
|
||||
"is_wired": s.IsWired.Txt,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
"use_fixed_ip": s.UseFixedIP.Txt,
|
||||
"channel": s.Channel.Txt,
|
||||
"vlan": s.Vlan.Txt,
|
||||
"hostname": s.Name,
|
||||
"essid": s.Essid,
|
||||
"bssid": s.Bssid,
|
||||
"ip": s.IP,
|
||||
}
|
||||
powerSaveEnabled := 0.0
|
||||
if s.PowersaveEnabled.Val {
|
||||
powerSaveEnabled = 1.0
|
||||
}
|
||||
data := map[string]float64{
|
||||
"anomalies": float64(s.Anomalies),
|
||||
"channel": s.Channel.Val,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"bytes_r": float64(s.BytesR),
|
||||
"ccq": float64(s.Ccq),
|
||||
"noise": float64(s.Noise),
|
||||
"powersave_enabled": powerSaveEnabled,
|
||||
"roam_count": float64(s.RoamCount),
|
||||
"rssi": float64(s.Rssi),
|
||||
"rx_bytes": float64(s.RxBytes),
|
||||
"rx_bytes_r": float64(s.RxBytesR),
|
||||
"rx_packets": float64(s.RxPackets),
|
||||
"rx_rate": float64(s.RxRate),
|
||||
"signal": float64(s.Signal),
|
||||
"tx_bytes": float64(s.TxBytes),
|
||||
"tx_bytes_r": float64(s.TxBytesR),
|
||||
"tx_packets": float64(s.TxPackets),
|
||||
"tx_retries": float64(s.TxRetries),
|
||||
"tx_power": float64(s.TxPower),
|
||||
"tx_rate": float64(s.TxRate),
|
||||
"uptime": float64(s.Uptime),
|
||||
"wifi_tx_attempts": float64(s.WifiTxAttempts),
|
||||
"wired_rx_bytes": float64(s.WiredRxBytes),
|
||||
"wired_rx_bytes-r": float64(s.WiredRxBytesR),
|
||||
"wired_rx_packets": float64(s.WiredRxPackets),
|
||||
"wired_tx_bytes": float64(s.WiredTxBytes),
|
||||
"wired_tx_bytes-r": float64(s.WiredTxBytesR),
|
||||
"wired_tx_packets": float64(s.WiredTxPackets),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("clients")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
|
||||
// totalsDPImap: controller, site, name (app/cat name), dpi.
|
||||
type totalsDPImap map[string]map[string]map[string]unifi.DPIData
|
||||
|
||||
func (u *DatadogUnifi) batchClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to batchClientDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
category := unifi.DPICats.Get(dpi.Cat)
|
||||
application := unifi.DPIApps.GetApp(dpi.Cat, dpi.App)
|
||||
fillDPIMapTotals(appTotal, application, s.SourceName, s.SiteName, dpi)
|
||||
fillDPIMapTotals(catTotal, category, s.SourceName, s.SiteName, dpi)
|
||||
|
||||
tags := map[string]string{
|
||||
"category": category,
|
||||
"application": application,
|
||||
"name": s.Name,
|
||||
"mac": s.MAC,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
}
|
||||
|
||||
data := map[string]float64{
|
||||
"tx_packets": float64(dpi.TxPackets),
|
||||
"rx_packets": float64(dpi.RxPackets),
|
||||
"tx_bytes": float64(dpi.TxBytes),
|
||||
"rx_bytes": float64(dpi.RxBytes),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("client_dpi")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
|
||||
// This allows less processing in Datadog to produce total transfer data per cat or app.
|
||||
func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
|
||||
if m[controller] == nil {
|
||||
m[controller] = make(map[string]map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
if m[controller][site] == nil {
|
||||
m[controller][site] = make(map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
existing := m[controller][site][name]
|
||||
existing.TxPackets += dpi.TxPackets
|
||||
existing.RxPackets += dpi.RxPackets
|
||||
existing.TxBytes += dpi.TxBytes
|
||||
existing.RxBytes += dpi.RxBytes
|
||||
m[controller][site][name] = existing
|
||||
}
|
||||
|
||||
func reportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
|
||||
type all []struct {
|
||||
kind string
|
||||
val totalsDPImap
|
||||
}
|
||||
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
if appTotal != nil {
|
||||
appTotal = nil
|
||||
}
|
||||
|
||||
// This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
|
||||
a := all{
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
{
|
||||
kind: "application",
|
||||
val: appTotal,
|
||||
},
|
||||
{
|
||||
kind: "category",
|
||||
val: catTotal,
|
||||
},
|
||||
}
|
||||
|
||||
for _, k := range a {
|
||||
for controller, s := range k.val {
|
||||
for site, c := range s {
|
||||
for name, m := range c {
|
||||
tags := map[string]string{
|
||||
"category": "TOTAL",
|
||||
"application": "TOTAL",
|
||||
"name": "TOTAL",
|
||||
"mac": "TOTAL",
|
||||
"site_name": site,
|
||||
"source": controller,
|
||||
}
|
||||
tags[k.kind] = name
|
||||
|
||||
data := map[string]float64{
|
||||
"tx_packets": float64(m.TxPackets),
|
||||
"rx_packets": float64(m.RxPackets),
|
||||
"tx_bytes": float64(m.TxBytes),
|
||||
"rx_bytes": float64(m.RxBytes),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("client_dpi")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,357 @@
|
|||
// Package datadogunifi provides the methods to turn UniFi measurements into Datadog
|
||||
// data points with appropriate tags and fields.
|
||||
package datadogunifi
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"golift.io/cnfg"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInterval = 30 * time.Second
|
||||
minimumInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// Config defines the data needed to store metrics in Datadog.
|
||||
type Config struct {
|
||||
// Required Config
|
||||
|
||||
// Interval controls the collection and reporting interval
|
||||
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval,omitempty" yaml:"interval,omitempty"`
|
||||
|
||||
// Save data for dead ports? ie. ports that are down or disabled.
|
||||
DeadPorts bool `json:"dead_ports,omitempty" toml:"dead_ports,omitempty" xml:"dead_ports,omitempty" yaml:"dead_ports,omitempty"`
|
||||
|
||||
// Enable when true, enables this output plugin
|
||||
Enable *bool `json:"enable" toml:"enable" xml:"enable,attr" yaml:"enable"`
|
||||
// Address determines how to talk to the Datadog agent
|
||||
Address string `json:"address" toml:"address" xml:"address,attr" yaml:"address"`
|
||||
|
||||
// Optional Statsd Options - mirrored from statsd.Options
|
||||
|
||||
// Namespace to prepend to all metrics, events and service checks name.
|
||||
Namespace *string `json:"namespace" toml:"namespace" xml:"namespace,attr" yaml:"namespace"`
|
||||
|
||||
// Tags are global tags to be applied to every metrics, events and service checks.
|
||||
Tags []string `json:"tags" toml:"tags" xml:"tags,attr" yaml:"tags"`
|
||||
|
||||
// MaxBytesPerPayload is the maximum number of bytes a single payload will contain.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 1432 for UDP and 8192 for UDS.
|
||||
MaxBytesPerPayload *int `json:"max_bytes_per_payload" toml:"max_bytes_per_payload" xml:"max_bytes_per_payload,attr" yaml:"max_bytes_per_payload"`
|
||||
|
||||
// MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
|
||||
// This option can be set to `1` to create an unbuffered client.
|
||||
MaxMessagesPerPayload *int `json:"max_messages_per_payload" toml:"max_messages_per_payload" xml:"max_messages_per_payload,attr" yaml:"max_messages_per_payload"`
|
||||
|
||||
// BufferPoolSize is the size of the pool of buffers in number of buffers.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
BufferPoolSize *int `json:"buffer_pool_size" toml:"buffer_pool_size" xml:"buffer_pool_size,attr" yaml:"buffer_pool_size"`
|
||||
|
||||
// BufferFlushInterval is the interval after which the current buffer will get flushed.
|
||||
BufferFlushInterval *cnfg.Duration `json:"buffer_flush_interval" toml:"buffer_flush_interval" xml:"buffer_flush_interval,attr" yaml:"buffer_flush_interval"`
|
||||
|
||||
// BufferShardCount is the number of buffer "shards" that will be used.
|
||||
// Those shards allows the use of multiple buffers at the same time to reduce
|
||||
// lock contention.
|
||||
BufferShardCount *int `json:"buffer_shard_count" toml:"buffer_shard_count" xml:"buffer_shard_count,attr" yaml:"buffer_shard_count"`
|
||||
|
||||
// SenderQueueSize is the size of the sender queue in number of buffers.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
SenderQueueSize *int `json:"sender_queue_size" toml:"sender_queue_size" xml:"sender_queue_size,attr" yaml:"sender_queue_size"`
|
||||
|
||||
// WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
|
||||
WriteTimeoutUDS *cnfg.Duration `json:"write_timeout_uds" toml:"write_timeout_uds" xml:"write_timeout_uds,attr" yaml:"write_timeout_uds"`
|
||||
|
||||
// ReceiveMode determines the behavior of the client when receiving to many
|
||||
// metrics. The client will either drop the metrics if its buffers are
|
||||
// full (ChannelMode mode) or block the caller until the metric can be
|
||||
// handled (MutexMode mode). By default the client will MutexMode. This
|
||||
// option should be set to ChannelMode only when use under very high
|
||||
// load.
|
||||
//
|
||||
// MutexMode uses a mutex internally which is much faster than
|
||||
// channel but causes some lock contention when used with a high number
|
||||
// of threads. Mutex are sharded based on the metrics name which
|
||||
// limit mutex contention when goroutines send different metrics.
|
||||
//
|
||||
// ChannelMode: uses channel (of ChannelModeBufferSize size) to send
|
||||
// metrics and drop metrics if the channel is full. Sending metrics in
|
||||
// this mode is slower that MutexMode (because of the channel), but
|
||||
// will not block the application. This mode is made for application
|
||||
// using many goroutines, sending the same metrics at a very high
|
||||
// volume. The goal is to not slow down the application at the cost of
|
||||
// dropping metrics and having a lower max throughput.
|
||||
ReceiveMode *statsd.ReceivingMode `json:"receive_mode" toml:"receive_mode" xml:"receive_mode,attr" yaml:"receive_mode"`
|
||||
|
||||
// ChannelModeBufferSize is the size of the channel holding incoming metrics
|
||||
ChannelModeBufferSize *int `json:"channel_mode_buffer_size" toml:"channel_mode_buffer_size" xml:"channel_mode_buffer_size,attr" yaml:"channel_mode_buffer_size"`
|
||||
|
||||
// AggregationFlushInterval is the interval for the aggregator to flush metrics
|
||||
AggregationFlushInterval *time.Duration `json:"aggregation_flush_interval" toml:"aggregation_flush_interval" xml:"aggregation_flush_interval,attr" yaml:"aggregation_flush_interval"`
|
||||
}
|
||||
|
||||
// Datadog allows the data to be context aware with configuration
|
||||
type Datadog struct {
|
||||
*Config `json:"datadog" toml:"datadog" xml:"datadog" yaml:"datadog"`
|
||||
options []statsd.Option // nolint
|
||||
}
|
||||
|
||||
// DatadogUnifi is returned by New() after you provide a Config.
|
||||
type DatadogUnifi struct {
|
||||
Collector poller.Collect
|
||||
datadog statsd.ClientInterface
|
||||
LastCheck time.Time
|
||||
*Datadog
|
||||
}
|
||||
|
||||
func init() { // nolint: gochecknoinits
|
||||
u := &DatadogUnifi{Datadog: &Datadog{}, LastCheck: time.Now()}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: "datadog",
|
||||
Config: u.Datadog,
|
||||
Method: u.Run,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) setConfigDefaults() {
|
||||
if u.Interval.Duration == 0 {
|
||||
u.Interval = cnfg.Duration{Duration: defaultInterval}
|
||||
} else if u.Interval.Duration < minimumInterval {
|
||||
u.Interval = cnfg.Duration{Duration: minimumInterval}
|
||||
}
|
||||
|
||||
u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
|
||||
|
||||
u.options = make([]statsd.Option, 0)
|
||||
|
||||
if u.Namespace != nil {
|
||||
u.options = append(u.options, statsd.WithNamespace(*u.Namespace))
|
||||
}
|
||||
|
||||
if u.Tags != nil && len(u.Tags) > 0 {
|
||||
u.options = append(u.options, statsd.WithTags(u.Tags))
|
||||
}
|
||||
|
||||
if u.MaxBytesPerPayload != nil {
|
||||
u.options = append(u.options, statsd.WithMaxBytesPerPayload(*u.MaxBytesPerPayload))
|
||||
}
|
||||
|
||||
if u.MaxMessagesPerPayload != nil {
|
||||
u.options = append(u.options, statsd.WithMaxMessagesPerPayload(*u.MaxMessagesPerPayload))
|
||||
}
|
||||
|
||||
if u.BufferPoolSize != nil {
|
||||
u.options = append(u.options, statsd.WithBufferPoolSize(*u.BufferPoolSize))
|
||||
}
|
||||
|
||||
if u.BufferFlushInterval != nil {
|
||||
u.options = append(u.options, statsd.WithBufferFlushInterval((*u.BufferFlushInterval).Duration))
|
||||
}
|
||||
|
||||
if u.BufferShardCount != nil {
|
||||
u.options = append(u.options, statsd.WithBufferShardCount(*u.BufferShardCount))
|
||||
}
|
||||
|
||||
if u.SenderQueueSize != nil {
|
||||
u.options = append(u.options, statsd.WithSenderQueueSize(*u.SenderQueueSize))
|
||||
}
|
||||
|
||||
if u.WriteTimeoutUDS != nil {
|
||||
u.options = append(u.options, statsd.WithWriteTimeoutUDS((*u.WriteTimeoutUDS).Duration))
|
||||
}
|
||||
|
||||
if u.ReceiveMode != nil {
|
||||
switch *u.ReceiveMode {
|
||||
case statsd.ChannelMode:
|
||||
u.options = append(u.options, statsd.WithChannelMode())
|
||||
case statsd.MutexMode:
|
||||
u.options = append(u.options, statsd.WithMutexMode())
|
||||
}
|
||||
}
|
||||
|
||||
if u.ChannelModeBufferSize != nil {
|
||||
u.options = append(u.options, statsd.WithChannelModeBufferSize(*u.ChannelModeBufferSize))
|
||||
}
|
||||
|
||||
if u.AggregationFlushInterval != nil {
|
||||
u.options = append(u.options, statsd.WithAggregationInterval(*u.AggregationFlushInterval))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Run runs a ticker to poll the unifi server and update Datadog.
|
||||
func (u *DatadogUnifi) Run(c poller.Collect) error {
|
||||
disabled := u == nil || u.Enable == nil || !(*u.Enable) || u.Config == nil
|
||||
if disabled {
|
||||
u.LogDebugf("Datadog config is disabled, output is disabled.")
|
||||
return nil
|
||||
}
|
||||
u.Collector = c
|
||||
u.Logf("Datadog is configured.")
|
||||
u.setConfigDefaults()
|
||||
|
||||
var err error
|
||||
u.datadog, err = statsd.New(u.Address, u.options...)
|
||||
if err != nil {
|
||||
u.LogErrorf("Error configuration Datadog agent reporting: %+v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
u.PollController()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PollController runs forever, polling UniFi and pushing to Datadog
|
||||
// This is started by Run() or RunBoth() after everything is validated.
|
||||
func (u *DatadogUnifi) PollController() {
|
||||
interval := u.Interval.Round(time.Second)
|
||||
ticker := time.NewTicker(interval)
|
||||
u.Logf("Everything checks out! Poller started, interval=%+v", interval)
|
||||
|
||||
for u.LastCheck = range ticker.C {
|
||||
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
|
||||
if err != nil {
|
||||
u.LogErrorf("metric fetch for Datadog failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
|
||||
if err != nil {
|
||||
u.LogErrorf("event fetch for Datadog failed", err)
|
||||
continue
|
||||
}
|
||||
|
||||
report, err := u.ReportMetrics(metrics, events)
|
||||
if err != nil {
|
||||
// Is the agent down?
|
||||
u.LogErrorf("unable to report metrics and events", err)
|
||||
_ = report.reportCount("unifi.collect.errors", 1, []string{})
|
||||
continue
|
||||
}
|
||||
_ = report.reportCount("unifi.collect.success", 1, []string{})
|
||||
u.LogDatadogReport(report)
|
||||
}
|
||||
}
|
||||
|
||||
// ReportMetrics batches all device and client data into datadog data points.
|
||||
// Call this after you've collected all the data you care about.
|
||||
// Returns an error if datadog statsd calls fail, otherwise returns a report.
|
||||
func (u *DatadogUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
|
||||
r := &Report{
|
||||
Metrics: m,
|
||||
Events: e,
|
||||
Start: time.Now(),
|
||||
Counts: &Counts{Val: make(map[item]int)},
|
||||
Collector: u.Collector,
|
||||
client: u.datadog,
|
||||
}
|
||||
// batch all the points.
|
||||
u.loopPoints(r)
|
||||
r.End = time.Now()
|
||||
r.Elapsed = r.End.Sub(r.Start)
|
||||
_ = r.reportTiming("unifi.collector_timing", r.Elapsed, []string{})
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// loopPoints collects all the data to immediately report to Datadog.
|
||||
func (u *DatadogUnifi) loopPoints(r report) {
|
||||
m := r.metrics()
|
||||
|
||||
for _, s := range m.RogueAPs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Sites {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.SitesDPI {
|
||||
u.reportSiteDPI(r, s.(*unifi.DPITable))
|
||||
}
|
||||
|
||||
for _, s := range m.Clients {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Devices {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range r.events().Logs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
appTotal := make(totalsDPImap)
|
||||
catTotal := make(totalsDPImap)
|
||||
|
||||
for _, s := range m.ClientsDPI {
|
||||
u.batchClientDPI(r, s, appTotal, catTotal)
|
||||
}
|
||||
|
||||
reportClientDPItotals(r, appTotal, catTotal)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
|
||||
switch v := v.(type) {
|
||||
case *unifi.RogueAP:
|
||||
u.batchRogueAP(r, v)
|
||||
case *unifi.UAP:
|
||||
u.batchUAP(r, v)
|
||||
case *unifi.USW:
|
||||
u.batchUSW(r, v)
|
||||
case *unifi.USG:
|
||||
u.batchUSG(r, v)
|
||||
case *unifi.UXG:
|
||||
u.batchUXG(r, v)
|
||||
case *unifi.UDM:
|
||||
u.batchUDM(r, v)
|
||||
case *unifi.Site:
|
||||
u.reportSite(r, v)
|
||||
case *unifi.Client:
|
||||
u.batchClient(r, v)
|
||||
case *unifi.Event:
|
||||
u.batchEvent(r, v)
|
||||
case *unifi.IDS:
|
||||
u.batchIDS(r, v)
|
||||
case *unifi.Alarm:
|
||||
u.batchAlarms(r, v)
|
||||
case *unifi.Anomaly:
|
||||
u.batchAnomaly(r, v)
|
||||
default:
|
||||
u.LogErrorf("invalid export, type=%+v", reflect.TypeOf(v))
|
||||
}
|
||||
}
|
||||
|
||||
// LogDatadogReport writes a log message after exporting to Datadog.
|
||||
func (u *DatadogUnifi) LogDatadogReport(r *Report) {
|
||||
m := r.Metrics
|
||||
u.Logf("UniFi Metrics Recorded num_sites=%d num_sites_dpi=%d num_clients=%d num_clients_dpi=%d num_rogue_ap=%d num_devices=%d errors=%v elapsec=%v",
|
||||
len(m.Sites),
|
||||
len(m.SitesDPI),
|
||||
len(m.Clients),
|
||||
len(m.ClientsDPI),
|
||||
len(m.RogueAPs),
|
||||
len(m.Devices),
|
||||
r.Errors,
|
||||
r.Elapsed,
|
||||
)
|
||||
metricName := metricNamespace("collector")
|
||||
_ = r.reportCount(metricName("num_sites"), int64(len(m.Sites)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_sites_dpi"), int64(len(m.SitesDPI)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_clients"), int64(len(m.Clients)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_clients_dpi"), int64(len(m.ClientsDPI)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_rogue_ap"), int64(len(m.RogueAPs)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_devices"), int64(len(m.Devices)), u.Tags)
|
||||
_ = r.reportCount(metricName("num_errors"), int64(len(r.Errors)), u.Tags)
|
||||
_ = r.reportTiming(metricName("elapsed_time"), r.Elapsed, u.Tags)
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// These constants are used as names for printed/logged counters.
|
||||
const (
|
||||
eventT = item("Event")
|
||||
idsT = item("IDS")
|
||||
)
|
||||
|
||||
// batchIDS generates intrusion detection datapoints for Datadog.
|
||||
func (u *DatadogUnifi) batchIDS(r report, i *unifi.IDS) { // nolint:dupl
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"dest_port": strconv.Itoa(i.DestPort),
|
||||
"src_port": strconv.Itoa(i.SrcPort),
|
||||
"dest_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"host": i.Host,
|
||||
"msg": i.Msg,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
|
||||
"dst_ip_city": i.DestIPGeo.City,
|
||||
"dst_ip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": i.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": i.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
|
||||
"src_ip_city": i.SourceIPGeo.City,
|
||||
"src_ip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": i.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": i.SourceIPGeo.Organization,
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"archived": i.Archived.Txt,
|
||||
"usg_ip": i.USGIP,
|
||||
"proto": i.Proto,
|
||||
"key": i.Key,
|
||||
"catname": i.Catname,
|
||||
"app_proto": i.AppProto,
|
||||
"action": i.InnerAlertAction,
|
||||
}
|
||||
|
||||
r.addCount(idsT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("Intrusion Detection at %s from %s", i.SiteName, i.SourceName)
|
||||
_ = r.reportEvent(title, i.Datetime, i.Msg, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
||||
// batchEvents generates events from UniFi for Datadog.
|
||||
func (u *DatadogUnifi) batchEvent(r report, i *unifi.Event) { // nolint: funlen
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"guest": i.Guest, // mac address
|
||||
"user": i.User, // mac address
|
||||
"host": i.Host, // usg device?
|
||||
"hostname": i.Hostname, // client name
|
||||
"dest_port": strconv.Itoa(i.DestPort),
|
||||
"src_port": strconv.Itoa(i.SrcPort),
|
||||
"dst_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"ip": i.IP,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
|
||||
"dst_ip_city": i.DestIPGeo.City,
|
||||
"dst_ip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": i.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": i.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
|
||||
"src_ip_city": i.SourceIPGeo.City,
|
||||
"src_ip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": i.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": i.SourceIPGeo.Organization,
|
||||
"admin": i.Admin, // username
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"ap_from": i.ApFrom,
|
||||
"ap_to": i.ApTo,
|
||||
"ap": i.Ap,
|
||||
"ap_name": i.ApName,
|
||||
"gw": i.Gw,
|
||||
"gw_name": i.GwName,
|
||||
"sw": i.Sw,
|
||||
"sw_name": i.SwName,
|
||||
"catname": i.Catname,
|
||||
"radio": i.Radio,
|
||||
"radio_from": i.RadioFrom,
|
||||
"radio_to": i.RadioTo,
|
||||
"key": i.Key,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"ssid": i.SSID,
|
||||
"is_admin": i.IsAdmin.Txt,
|
||||
"channel": i.Channel.Txt,
|
||||
"channel_from": i.ChannelFrom.Txt,
|
||||
"channel_to": i.ChannelTo.Txt,
|
||||
"usg_ip": i.USGIP,
|
||||
"network": i.Network,
|
||||
"app_proto": i.AppProto,
|
||||
"proto": i.Proto,
|
||||
"action": i.InnerAlertAction,
|
||||
}
|
||||
|
||||
r.addCount(eventT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("Unifi Event at %s from %s", i.SiteName, i.SourceName)
|
||||
_ = r.reportEvent(title, i.Datetime, i.Msg, tags)
|
||||
r.reportInfoLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
package datadogunifi
|
||||
|
||||
// Logf logs a message.
|
||||
func (u *DatadogUnifi) Logf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.Logf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (u *DatadogUnifi) LogErrorf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.LogErrorf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (u *DatadogUnifi) LogDebugf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.LogDebugf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func tag(name string, value interface{}) string {
|
||||
return fmt.Sprintf("%s:%v", name, value)
|
||||
}
|
||||
|
||||
func tagMapToTags(tagMap map[string]string) []string {
|
||||
tags := make([]string, 0)
|
||||
for k, v := range tagMap {
|
||||
tags = append(tags, tag(k, v))
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func tagMapToSimpleStrings(tagMap map[string]string) string {
|
||||
result := ""
|
||||
for k, v := range tagMap {
|
||||
result = fmt.Sprintf("%s%s=\"%v\", ", result, k, v)
|
||||
}
|
||||
return strings.TrimRight(result, ", ")
|
||||
}
|
||||
|
||||
func metricNamespace(namespace string) func(string) string {
|
||||
return func(name string) string {
|
||||
return fmt.Sprintf("unifi.%s.%s", namespace, name)
|
||||
}
|
||||
}
|
||||
|
||||
func reportGaugeForFloat64Map(r report, metricName func(string) string, data map[string]float64, tags map[string]string) {
|
||||
for name, value := range data {
|
||||
_ = r.reportGauge(metricName(name), value, tagMapToTags(tags))
|
||||
}
|
||||
}
|
||||
|
||||
// cleanTags removes any tag that is empty.
|
||||
func cleanTags(tags map[string]string) map[string]string {
|
||||
for i := range tags {
|
||||
if tags[i] == "" {
|
||||
delete(tags, i)
|
||||
}
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
// Report is a will report the current collection run data.
|
||||
type Report struct {
|
||||
Metrics *poller.Metrics
|
||||
Events *poller.Events
|
||||
Errors []error
|
||||
Counts *Counts
|
||||
Start time.Time
|
||||
End time.Time
|
||||
Elapsed time.Duration
|
||||
|
||||
Collector poller.Collect
|
||||
|
||||
Total int
|
||||
Fields int
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
client statsd.ClientInterface
|
||||
}
|
||||
|
||||
// Counts holds counters and has a lock to deal with routines.
|
||||
type Counts struct {
|
||||
Val map[item]int
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type report interface {
|
||||
add()
|
||||
done()
|
||||
error(err error)
|
||||
metrics() *poller.Metrics
|
||||
events() *poller.Events
|
||||
addCount(item, ...int)
|
||||
|
||||
reportGauge(name string, value float64, tags []string) error
|
||||
reportCount(name string, value int64, tags []string) error
|
||||
reportDistribution(name string, value float64, tags []string) error
|
||||
reportTiming(name string, value time.Duration, tags []string) error
|
||||
reportEvent(title string, date time.Time, message string, tags []string) error
|
||||
reportInfoLog(message string, f ...interface{})
|
||||
reportWarnLog(message string, f ...interface{})
|
||||
reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error
|
||||
}
|
||||
|
||||
func (r *Report) add() {
|
||||
r.wg.Add(1)
|
||||
}
|
||||
|
||||
func (r *Report) done() {
|
||||
r.wg.Done()
|
||||
}
|
||||
|
||||
func (r *Report) metrics() *poller.Metrics {
|
||||
return r.Metrics
|
||||
}
|
||||
|
||||
func (r *Report) events() *poller.Events {
|
||||
return r.Events
|
||||
}
|
||||
|
||||
/* The following methods are not thread safe. */
|
||||
|
||||
type item string
|
||||
|
||||
func (r *Report) addCount(name item, counts ...int) {
|
||||
r.Counts.Lock()
|
||||
defer r.Counts.Unlock()
|
||||
|
||||
if len(counts) == 0 {
|
||||
r.Counts.Val[name]++
|
||||
}
|
||||
|
||||
for _, c := range counts {
|
||||
r.Counts.Val[name] += c
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) error(err error) {
|
||||
if err != nil {
|
||||
r.Errors = append(r.Errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) reportGauge(name string, value float64, tags []string) error {
|
||||
return r.client.Gauge(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportCount(name string, value int64, tags []string) error {
|
||||
return r.client.Count(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportDistribution(name string, value float64, tags []string) error {
|
||||
return r.client.Distribution(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportTiming(name string, value time.Duration, tags []string) error {
|
||||
return r.client.Timing(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportEvent(title string, date time.Time, message string, tags []string) error {
|
||||
if date.IsZero() {
|
||||
date = time.Now()
|
||||
}
|
||||
return r.client.Event(&statsd.Event{
|
||||
Title: title,
|
||||
Text: message,
|
||||
Timestamp: date,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Report) reportInfoLog(message string, f ...interface{}) {
|
||||
r.Collector.Logf(message, f)
|
||||
}
|
||||
|
||||
func (r *Report) reportWarnLog(message string, f ...interface{}) {
|
||||
r.Collector.Logf(message, f)
|
||||
}
|
||||
|
||||
func (r *Report) reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error {
|
||||
return r.client.ServiceCheck(&statsd.ServiceCheck{
|
||||
Name: name,
|
||||
Status: status,
|
||||
Timestamp: time.Now(),
|
||||
Message: message,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// reportSite generates Unifi Sites' datapoints for Datadog.
|
||||
// These points can be passed directly to Datadog.
|
||||
func (u *DatadogUnifi) reportSite(r report, s *unifi.Site) {
|
||||
metricName := metricNamespace("subsystems")
|
||||
|
||||
for _, h := range s.Health {
|
||||
tags := []string{
|
||||
tag("name", s.Name),
|
||||
tag("site_name", s.SiteName),
|
||||
tag("source", s.SourceName),
|
||||
tag("desc", s.Desc),
|
||||
tag("status", h.Status),
|
||||
tag("subsystem", h.Subsystem),
|
||||
tag("wan_ip", h.WanIP),
|
||||
tag("gw_name", h.GwName),
|
||||
tag("lan_ip", h.LanIP),
|
||||
}
|
||||
|
||||
data := map[string]float64{
|
||||
"num_user": h.NumUser.Val,
|
||||
"num_guest": h.NumGuest.Val,
|
||||
"num_iot": h.NumIot.Val,
|
||||
"tx_bytes_r": h.TxBytesR.Val,
|
||||
"rx_bytes_r": h.RxBytesR.Val,
|
||||
"num_ap": h.NumAp.Val,
|
||||
"num_adopted": h.NumAdopted.Val,
|
||||
"num_disabled": h.NumDisabled.Val,
|
||||
"num_disconnected": h.NumDisconnected.Val,
|
||||
"num_pending": h.NumPending.Val,
|
||||
"num_gw": h.NumGw.Val,
|
||||
"num_sta": h.NumSta.Val,
|
||||
"gw_cpu": h.GwSystemStats.CPU.Val,
|
||||
"gw_mem": h.GwSystemStats.Mem.Val,
|
||||
"gw_uptime": h.GwSystemStats.Uptime.Val,
|
||||
"latency": h.Latency.Val,
|
||||
"uptime": h.Uptime.Val,
|
||||
"drops": h.Drops.Val,
|
||||
"xput_up": h.XputUp.Val,
|
||||
"xput_down": h.XputDown.Val,
|
||||
"speedtest_ping": h.SpeedtestPing.Val,
|
||||
"speedtest_lastrun": h.SpeedtestLastrun.Val,
|
||||
"num_sw": h.NumSw.Val,
|
||||
"remote_user_num_active": h.RemoteUserNumActive.Val,
|
||||
"remote_user_num_inactive": h.RemoteUserNumInactive.Val,
|
||||
"remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
|
||||
"remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
|
||||
"remote_user_rx_packets": h.RemoteUserRxPackets.Val,
|
||||
"remote_user_tx_packets": h.RemoteUserTxPackets.Val,
|
||||
"num_new_alarms": s.NumNewAlarms.Val,
|
||||
}
|
||||
|
||||
for name, value := range data {
|
||||
_ = r.reportGauge(metricName(name), value, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) reportSiteDPI(r report, s *unifi.DPITable) {
|
||||
for _, dpi := range s.ByApp {
|
||||
metricName := metricNamespace("sitedpi")
|
||||
|
||||
tags := []string{
|
||||
tag("category", unifi.DPICats.Get(dpi.Cat)),
|
||||
tag("application", unifi.DPIApps.GetApp(dpi.Cat, dpi.App)),
|
||||
tag("site_name", s.SiteName),
|
||||
tag("source", s.SourceName),
|
||||
}
|
||||
|
||||
_ = r.reportCount(metricName("tx_packets"), dpi.TxPackets, tags)
|
||||
_ = r.reportCount(metricName("rx_packets"), dpi.RxPackets, tags)
|
||||
_ = r.reportCount(metricName("tx_bytes"), dpi.TxBytes, tags)
|
||||
_ = r.reportCount(metricName("rx_bytes"), dpi.RxBytes, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,235 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uapT is used as a name for printed/logged counters.
|
||||
const uapT = item("UAP")
|
||||
|
||||
// batchRogueAP generates metric points for neighboring access points.
|
||||
func (u *DatadogUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
|
||||
if s.Age.Val == 0 {
|
||||
return // only keep metrics for things that are recent.
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"security": s.Security,
|
||||
"oui": s.Oui,
|
||||
"band": s.Band,
|
||||
"mac": s.Bssid,
|
||||
"ap_mac": s.ApMac,
|
||||
"radio": s.Radio,
|
||||
"radio_name": s.RadioName,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Essid,
|
||||
"source": s.SourceName,
|
||||
})
|
||||
|
||||
data := map[string]float64{
|
||||
"age": s.Age.Val,
|
||||
"bw": s.Bw.Val,
|
||||
"center_freq": s.CenterFreq.Val,
|
||||
"channel": float64(s.Channel),
|
||||
"freq": s.Freq.Val,
|
||||
"noise": s.Noise.Val,
|
||||
"rssi": s.Rssi.Val,
|
||||
"rssi_age": s.RssiAge.Val,
|
||||
"signal": s.Signal.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_rogue")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
|
||||
// batchUAP generates Wireless-Access-Point datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data := CombineFloat64(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
|
||||
data["bytes"] = s.Bytes.Val
|
||||
data["last_seen"] = s.LastSeen.Val
|
||||
data["rx_bytes"] = s.RxBytes.Val
|
||||
data["tx_bytes"] = s.TxBytes.Val
|
||||
data["uptime"] = s.Uptime.Val
|
||||
data["user_num_sta"] = s.UserNumSta.Val
|
||||
data["guest_num_sta"] = s.GuestNumSta.Val
|
||||
data["num_sta"] = s.NumSta.Val
|
||||
|
||||
r.addCount(uapT)
|
||||
|
||||
metricName := metricNamespace("uap")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.processVAPTable(r, tags, s.VapTable)
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) processUAPstats(ap *unifi.Ap) map[string]float64 {
|
||||
if ap == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
// Accumulative Statistics.
|
||||
return map[string]float64{
|
||||
"stat_user-rx_packets": ap.UserRxPackets.Val,
|
||||
"stat_guest-rx_packets": ap.GuestRxPackets.Val,
|
||||
"stat_rx_packets": ap.RxPackets.Val,
|
||||
"stat_user-rx_bytes": ap.UserRxBytes.Val,
|
||||
"stat_guest-rx_bytes": ap.GuestRxBytes.Val,
|
||||
"stat_rx_bytes": ap.RxBytes.Val,
|
||||
"stat_user-rx_errors": ap.UserRxErrors.Val,
|
||||
"stat_guest-rx_errors": ap.GuestRxErrors.Val,
|
||||
"stat_rx_errors": ap.RxErrors.Val,
|
||||
"stat_user-rx_dropped": ap.UserRxDropped.Val,
|
||||
"stat_guest-rx_dropped": ap.GuestRxDropped.Val,
|
||||
"stat_rx_dropped": ap.RxDropped.Val,
|
||||
"stat_user-rx_crypts": ap.UserRxCrypts.Val,
|
||||
"stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
|
||||
"stat_rx_crypts": ap.RxCrypts.Val,
|
||||
"stat_user-rx_frags": ap.UserRxFrags.Val,
|
||||
"stat_guest-rx_frags": ap.GuestRxFrags.Val,
|
||||
"stat_rx_frags": ap.RxFrags.Val,
|
||||
"stat_user-tx_packets": ap.UserTxPackets.Val,
|
||||
"stat_guest-tx_packets": ap.GuestTxPackets.Val,
|
||||
"stat_tx_packets": ap.TxPackets.Val,
|
||||
"stat_user-tx_bytes": ap.UserTxBytes.Val,
|
||||
"stat_guest-tx_bytes": ap.GuestTxBytes.Val,
|
||||
"stat_tx_bytes": ap.TxBytes.Val,
|
||||
"stat_user-tx_errors": ap.UserTxErrors.Val,
|
||||
"stat_guest-tx_errors": ap.GuestTxErrors.Val,
|
||||
"stat_tx_errors": ap.TxErrors.Val,
|
||||
"stat_user-tx_dropped": ap.UserTxDropped.Val,
|
||||
"stat_guest-tx_dropped": ap.GuestTxDropped.Val,
|
||||
"stat_tx_dropped": ap.TxDropped.Val,
|
||||
"stat_user-tx_retries": ap.UserTxRetries.Val,
|
||||
"stat_guest-tx_retries": ap.GuestTxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
|
||||
func (u *DatadogUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) { // nolint: funlen
|
||||
for _, s := range vt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"ap_mac": s.ApMac,
|
||||
"bssid": s.Bssid,
|
||||
"id": s.ID,
|
||||
"name": s.Name,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"essid": s.Essid,
|
||||
"site_id": s.SiteID,
|
||||
"usage": s.Usage,
|
||||
"state": s.State,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
}
|
||||
data := map[string]float64{
|
||||
"ccq": float64(s.Ccq),
|
||||
"mac_filter_rejections": float64(s.MacFilterRejections),
|
||||
"num_satisfaction_sta": s.NumSatisfactionSta.Val,
|
||||
"avg_client_signal": s.AvgClientSignal.Val,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"satisfaction_now": s.SatisfactionNow.Val,
|
||||
"num_sta": float64(s.NumSta),
|
||||
"channel": s.Channel.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"rx_crypts": s.RxCrypts.Val,
|
||||
"rx_dropped": s.RxDropped.Val,
|
||||
"rx_errors": s.RxErrors.Val,
|
||||
"rx_frags": s.RxFrags.Val,
|
||||
"rx_nwids": s.RxNwids.Val,
|
||||
"rx_packets": s.RxPackets.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"tx_dropped": s.TxDropped.Val,
|
||||
"tx_errors": s.TxErrors.Val,
|
||||
"tx_packets": s.TxPackets.Val,
|
||||
"tx_power": s.TxPower.Val,
|
||||
"tx_retries": s.TxRetries.Val,
|
||||
"tx_combined_retries": s.TxCombinedRetries.Val,
|
||||
"tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
|
||||
"tx_rts_retries": s.TxRtsRetries.Val,
|
||||
"tx_success": s.TxSuccess.Val,
|
||||
"tx_total": s.TxTotal.Val,
|
||||
"tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
|
||||
"tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
|
||||
"tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
|
||||
"tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
|
||||
"rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
|
||||
"rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
|
||||
"rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
|
||||
"rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
|
||||
"wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
|
||||
"wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
|
||||
"wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
|
||||
"wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
|
||||
"wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_vaps")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
|
||||
for _, p := range rt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"channel": p.Channel.Txt,
|
||||
"radio": p.Radio,
|
||||
"ht": p.Ht.Txt,
|
||||
}
|
||||
data := map[string]float64{
|
||||
"current_antenna_gain": p.CurrentAntennaGain.Val,
|
||||
"max_txpower": p.MaxTxpower.Val,
|
||||
"min_txpower": p.MinTxpower.Val,
|
||||
"nss": p.Nss.Val,
|
||||
"radio_caps": p.RadioCaps.Val,
|
||||
}
|
||||
|
||||
for _, t := range rts {
|
||||
if t.Name == p.Name {
|
||||
data["ast_be_xmit"] = t.AstBeXmit.Val
|
||||
data["channel"] = t.Channel.Val
|
||||
data["cu_self_rx"] = t.CuSelfRx.Val
|
||||
data["cu_self_tx"] = t.CuSelfTx.Val
|
||||
data["cu_total"] = t.CuTotal.Val
|
||||
data["ext_channel"] = t.Extchannel.Val
|
||||
data["gain"] = t.Gain.Val
|
||||
data["guest_num_sta"] = t.GuestNumSta.Val
|
||||
data["num_sta"] = t.NumSta.Val
|
||||
data["tx_packets"] = t.TxPackets.Val
|
||||
data["tx_power"] = t.TxPower.Val
|
||||
data["tx_retries"] = t.TxRetries.Val
|
||||
data["user_num_sta"] = t.UserNumSta.Val
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_radios")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// udmT is used as a name for printed/logged counters.
|
||||
const udmT = item("UDM")
|
||||
|
||||
// Combine concatenates N maps. This will delete things if not used with caution.
|
||||
func Combine(in ...map[string]interface{}) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
|
||||
for i := range in {
|
||||
for k := range in[i] {
|
||||
out[k] = in[i][k]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// CombineFloat64 concatenates N maps. This will delete things if not used with caution.
|
||||
func CombineFloat64(in ...map[string]float64) map[string]float64 {
|
||||
out := make(map[string]float64)
|
||||
|
||||
for i := range in {
|
||||
for k := range in[i] {
|
||||
out[k] = in[i][k]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// batchSysStats is used by all device types.
|
||||
func (u *DatadogUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]float64 {
|
||||
m := map[string]float64{
|
||||
"loadavg_1": s.Loadavg1.Val,
|
||||
"loadavg_5": s.Loadavg5.Val,
|
||||
"loadavg_15": s.Loadavg15.Val,
|
||||
"mem_used": s.MemUsed.Val,
|
||||
"mem_buffer": s.MemBuffer.Val,
|
||||
"mem_total": s.MemTotal.Val,
|
||||
"cpu": ss.CPU.Val,
|
||||
"mem": ss.Mem.Val,
|
||||
"system_uptime": ss.Uptime.Val,
|
||||
}
|
||||
|
||||
for k, v := range ss.Temps {
|
||||
temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
|
||||
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
|
||||
|
||||
if temp != 0 && k != "" {
|
||||
m["temp_"+strings.ToLower(k)] = float64(temp)
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]float64 {
|
||||
output := make(map[string]float64)
|
||||
|
||||
for _, t := range temps {
|
||||
output["temp_"+t.Name] = t.Value
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]float64 {
|
||||
output := make(map[string]float64)
|
||||
|
||||
for _, t := range storage {
|
||||
output["storage_"+t.Name+"_size"] = t.Size.Val
|
||||
output["storage_"+t.Name+"_used"] = t.Used.Val
|
||||
|
||||
if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
|
||||
output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
|
||||
} else {
|
||||
output["storage_"+t.Name+"_pct"] = 0
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// batchUDM generates Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(udmT)
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
metricName = metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
|
||||
if s.Stat.Ap == nil {
|
||||
return // we're done now. the following code process UDM (non-pro) UAP data.
|
||||
}
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = u.processUAPstats(s.Stat.Ap)
|
||||
data["bytes"] = s.Bytes.Val
|
||||
data["last_seen"] = s.LastSeen.Val
|
||||
data["rx_bytes"] = s.RxBytes.Val
|
||||
data["tx_bytes"] = s.TxBytes.Val
|
||||
data["uptime"] = s.Uptime.Val
|
||||
data["state"] = s.State.Val
|
||||
data["user_num_sta"] = s.UserNumSta.Val
|
||||
data["guest_num_sta"] = s.GuestNumSta.Val
|
||||
data["num_sta"] = s.NumSta.Val
|
||||
|
||||
metricName = metricNamespace("uap")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
|
||||
u.processVAPTable(r, tags, *s.VapTable)
|
||||
}
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// usgT is used as a name for printed/logged counters.
|
||||
const usgT = item("USG")
|
||||
|
||||
// batchUSG generates Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUSG(r report, s *unifi.USG) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
}
|
||||
data := CombineFloat64(
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(usgT)
|
||||
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSGstats(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]float64 {
|
||||
if gw == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
return map[string]float64{
|
||||
"uplink_latency": ul.Latency.Val,
|
||||
"uplink_speed": ul.Speed.Val,
|
||||
"speedtest_status_latency": ss.Latency.Val,
|
||||
"speedtest_status_runtime": ss.Runtime.Val,
|
||||
"speedtest_status_rundate": ss.Rundate.Val,
|
||||
"speedtest_status_ping": ss.StatusPing.Val,
|
||||
"speedtest_status_xput_download": ss.XputDownload.Val,
|
||||
"speedtest_status_xput_upload": ss.XputUpload.Val,
|
||||
"lan_rx_bytes": gw.LanRxBytes.Val,
|
||||
"lan_rx_packets": gw.LanRxPackets.Val,
|
||||
"lan_tx_bytes": gw.LanTxBytes.Val,
|
||||
"lan_tx_packets": gw.LanTxPackets.Val,
|
||||
"lan_rx_dropped": gw.LanRxDropped.Val,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
|
||||
for _, wan := range wans {
|
||||
if !wan.Up.Val {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"ip": wan.IP,
|
||||
"purpose": wan.Name,
|
||||
"mac": wan.Mac,
|
||||
"ifname": wan.Ifname,
|
||||
"type": wan.Type,
|
||||
"up": wan.Up.Txt,
|
||||
"enabled": wan.Enable.Txt,
|
||||
"gateway": wan.Gateway,
|
||||
})
|
||||
|
||||
fullDuplex := 0.0
|
||||
if wan.FullDuplex.Val {
|
||||
fullDuplex = 1.0
|
||||
}
|
||||
data := map[string]float64{
|
||||
"bytes_r": wan.BytesR.Val,
|
||||
"full_duplex": fullDuplex,
|
||||
"max_speed": wan.MaxSpeed.Val,
|
||||
"rx_bytes": wan.RxBytes.Val,
|
||||
"rx_bytes_r": wan.RxBytesR.Val,
|
||||
"rx_dropped": wan.RxDropped.Val,
|
||||
"rx_errors": wan.RxErrors.Val,
|
||||
"rx_broadcast": wan.RxBroadcast.Val,
|
||||
"rx_multicast": wan.RxMulticast.Val,
|
||||
"rx_packets": wan.RxPackets.Val,
|
||||
"speed": wan.Speed.Val,
|
||||
"tx_bytes": wan.TxBytes.Val,
|
||||
"tx_bytes_r": wan.TxBytesR.Val,
|
||||
"tx_dropped": wan.TxDropped.Val,
|
||||
"tx_errors": wan.TxErrors.Val,
|
||||
"tx_packets": wan.TxPackets.Val,
|
||||
"tx_broadcast": wan.TxBroadcast.Val,
|
||||
"tx_multicast": wan.TxMulticast.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usg.wan_ports")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
|
||||
for _, p := range nt {
|
||||
tags := cleanTags(map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"up": p.Up.Txt,
|
||||
"enabled": p.Enabled.Txt,
|
||||
"ip": p.IP,
|
||||
"mac": p.Mac,
|
||||
"name": p.Name,
|
||||
"domain_name": p.DomainName,
|
||||
"purpose": p.Purpose,
|
||||
"is_guest": p.IsGuest.Txt,
|
||||
})
|
||||
data := map[string]float64{
|
||||
"num_sta": p.NumSta.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usg.networks")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uswT is used as a name for printed/logged counters.
|
||||
const uswT = item("USW")
|
||||
|
||||
// batchUSW generates Unifi Switch datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUSW(r report, s *unifi.USW) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"fan_level": s.FanLevel.Val,
|
||||
"general_temperature": s.GeneralTemperature.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
})
|
||||
|
||||
r.addCount(uswT)
|
||||
metricName := metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSWstat(sw *unifi.Sw) map[string]float64 {
|
||||
if sw == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
return map[string]float64{
|
||||
"stat_bytes": sw.Bytes.Val,
|
||||
"stat_rx_bytes": sw.RxBytes.Val,
|
||||
"stat_rx_crypts": sw.RxCrypts.Val,
|
||||
"stat_rx_dropped": sw.RxDropped.Val,
|
||||
"stat_rx_errors": sw.RxErrors.Val,
|
||||
"stat_rx_frags": sw.RxFrags.Val,
|
||||
"stat_rx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_bytes": sw.TxBytes.Val,
|
||||
"stat_tx_dropped": sw.TxDropped.Val,
|
||||
"stat_tx_errors": sw.TxErrors.Val,
|
||||
"stat_tx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_retries": sw.TxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:funlen
|
||||
func (u *DatadogUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
|
||||
for _, p := range pt {
|
||||
if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
|
||||
continue // only record UP ports.
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"site_name": t["site_name"],
|
||||
"device_name": t["name"],
|
||||
"source": t["source"],
|
||||
"type": t["type"],
|
||||
"name": p.Name,
|
||||
"poe_mode": p.PoeMode,
|
||||
"port_poe": p.PortPoe.Txt,
|
||||
"port_idx": p.PortIdx.Txt,
|
||||
"port_id": t["name"] + " Port " + p.PortIdx.Txt,
|
||||
"poe_enable": p.PoeEnable.Txt,
|
||||
"flow_ctrl_rx": p.FlowctrlRx.Txt,
|
||||
"flow_ctrl_tx": p.FlowctrlTx.Txt,
|
||||
"media": p.Media,
|
||||
"has_sfp": p.SFPFound.Txt,
|
||||
"sfp_compliance": p.SFPCompliance,
|
||||
"sfp_serial": p.SFPSerial,
|
||||
"sfp_vendor": p.SFPVendor,
|
||||
"sfp_part": p.SFPPart,
|
||||
})
|
||||
data := map[string]float64{
|
||||
"bytes_r": p.BytesR.Val,
|
||||
"rx_broadcast": p.RxBroadcast.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_bytes_r": p.RxBytesR.Val,
|
||||
"rx_dropped": p.RxDropped.Val,
|
||||
"rx_errors": p.RxErrors.Val,
|
||||
"rx_multicast": p.RxMulticast.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"speed": p.Speed.Val,
|
||||
"stp_path_cost": p.StpPathcost.Val,
|
||||
"tx_broadcast": p.TxBroadcast.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_bytes_r": p.TxBytesR.Val,
|
||||
"tx_dropped": p.TxDropped.Val,
|
||||
"tx_errors": p.TxErrors.Val,
|
||||
"tx_multicast": p.TxMulticast.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
if p.PoeEnable.Val && p.PortPoe.Val {
|
||||
data["poe_current"] = p.PoeCurrent.Val
|
||||
data["poe_power"] = p.PoePower.Val
|
||||
data["poe_voltage"] = p.PoeVoltage.Val
|
||||
}
|
||||
|
||||
if p.SFPFound.Val {
|
||||
data["sfp_current"] = p.SFPCurrent.Val
|
||||
data["sfp_voltage"] = p.SFPVoltage.Val
|
||||
data["sfp_temperature"] = p.SFPTemperature.Val
|
||||
data["sfp_tx_power"] = p.SFPTxpower.Val
|
||||
data["sfp_rx_power"] = p.SFPRxpower.Val
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usw.ports")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uxgT is used as a name for printed/logged counters.
|
||||
const uxgT = item("UXG")
|
||||
|
||||
// batchUXG generates 10Gb Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(uxgT)
|
||||
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
metricName = metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT LICENSE.
|
||||
Copyright (c) 2018-2021 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# influxunifi
|
||||
|
||||
## UnPoller Input Plugin
|
||||
|
||||
Collects UniFi data from a UniFi controller using the API.
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const (
|
||||
alarmT = item("Alarm")
|
||||
anomalyT = item("Anomaly")
|
||||
)
|
||||
|
||||
// batchAlarms generates alarm datapoints for InfluxDB.
|
||||
func (u *InfluxUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dupl
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dest_port": event.DestPort,
|
||||
"src_port": event.SrcPort,
|
||||
"dest_ip": event.DestIP,
|
||||
"dst_mac": event.DstMAC,
|
||||
"host": event.Host,
|
||||
"msg": event.Msg,
|
||||
"src_ip": event.SrcIP,
|
||||
"src_mac": event.SrcMAC,
|
||||
"dstip_asn": event.DestIPGeo.Asn,
|
||||
"dstip_latitude": event.DestIPGeo.Latitude,
|
||||
"dstip_longitude": event.DestIPGeo.Longitude,
|
||||
"dstip_city": event.DestIPGeo.City,
|
||||
"dstip_continent_code": event.DestIPGeo.ContinentCode,
|
||||
"dstip_country_code": event.DestIPGeo.CountryCode,
|
||||
"dstip_country_name": event.DestIPGeo.CountryName,
|
||||
"dstip_organization": event.DestIPGeo.Organization,
|
||||
"srcip_asn": event.SourceIPGeo.Asn,
|
||||
"srcip_latitude": event.SourceIPGeo.Latitude,
|
||||
"srcip_longitude": event.SourceIPGeo.Longitude,
|
||||
"srcip_city": event.SourceIPGeo.City,
|
||||
"srcip_continent_code": event.SourceIPGeo.ContinentCode,
|
||||
"srcip_country_code": event.SourceIPGeo.CountryCode,
|
||||
"srcip_country_name": event.SourceIPGeo.CountryName,
|
||||
"srcip_organization": event.SourceIPGeo.Organization,
|
||||
}
|
||||
|
||||
r.addCount(alarmT)
|
||||
r.send(&metric{
|
||||
Table: "unifi_alarm",
|
||||
TS: event.Datetime,
|
||||
Fields: cleanFields(fields),
|
||||
Tags: cleanTags(map[string]string{
|
||||
"site_name": event.SiteName,
|
||||
"source": event.SourceName,
|
||||
"in_iface": event.InIface,
|
||||
"event_type": event.EventType,
|
||||
"subsystem": event.Subsystem,
|
||||
"archived": event.Archived.Txt,
|
||||
"usgip": event.USGIP,
|
||||
"proto": event.Proto,
|
||||
"key": event.Key,
|
||||
"catname": event.Catname,
|
||||
"app_proto": event.AppProto,
|
||||
"action": event.InnerAlertAction,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// batchAnomaly generates Anomalies from UniFi for InfluxDB.
|
||||
func (u *InfluxUnifi) batchAnomaly(r report, event *unifi.Anomaly) {
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
r.addCount(anomalyT)
|
||||
r.send(&metric{
|
||||
TS: event.Datetime,
|
||||
Table: "unifi_anomaly",
|
||||
Fields: map[string]interface{}{"msg": event.Anomaly},
|
||||
Tags: cleanTags(map[string]string{
|
||||
"application": "unifi_anomaly",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"device_mac": event.DeviceMAC,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// batchClient generates Unifi Client datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"ap_name": s.ApName,
|
||||
"gw_name": s.GwName,
|
||||
"sw_name": s.SwName,
|
||||
"oui": s.Oui,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"radio_proto": s.RadioProto,
|
||||
"name": s.Name,
|
||||
"fixed_ip": s.FixedIP,
|
||||
"sw_port": s.SwPort.Txt,
|
||||
"os_class": s.OsClass.Txt,
|
||||
"os_name": s.OsName.Txt,
|
||||
"dev_cat": s.DevCat.Txt,
|
||||
"dev_id": s.DevID.Txt,
|
||||
"dev_vendor": s.DevVendor.Txt,
|
||||
"dev_family": s.DevFamily.Txt,
|
||||
"is_wired": s.IsWired.Txt,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
"use_fixedip": s.UseFixedIP.Txt,
|
||||
"channel": s.Channel.Txt,
|
||||
"vlan": s.Vlan.Txt,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"anomalies": s.Anomalies,
|
||||
"ip": s.IP,
|
||||
"essid": s.Essid,
|
||||
"bssid": s.Bssid,
|
||||
"channel": s.Channel.Val,
|
||||
"hostname": s.Name,
|
||||
"radio_desc": s.RadioDescription,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"bytes_r": s.BytesR,
|
||||
"ccq": s.Ccq,
|
||||
"noise": s.Noise,
|
||||
"note": s.Note,
|
||||
"powersave_enabled": s.PowersaveEnabled,
|
||||
"roam_count": s.RoamCount,
|
||||
"rssi": s.Rssi,
|
||||
"rx_bytes": s.RxBytes,
|
||||
"rx_bytes_r": s.RxBytesR,
|
||||
"rx_packets": s.RxPackets,
|
||||
"rx_rate": s.RxRate,
|
||||
"signal": s.Signal,
|
||||
"tx_bytes": s.TxBytes,
|
||||
"tx_bytes_r": s.TxBytesR,
|
||||
"tx_packets": s.TxPackets,
|
||||
"tx_retries": s.TxRetries,
|
||||
"tx_power": s.TxPower,
|
||||
"tx_rate": s.TxRate,
|
||||
"uptime": s.Uptime,
|
||||
"wifi_tx_attempts": s.WifiTxAttempts,
|
||||
"wired-rx_bytes": s.WiredRxBytes,
|
||||
"wired-rx_bytes-r": s.WiredRxBytesR,
|
||||
"wired-rx_packets": s.WiredRxPackets,
|
||||
"wired-tx_bytes": s.WiredTxBytes,
|
||||
"wired-tx_bytes-r": s.WiredTxBytesR,
|
||||
"wired-tx_packets": s.WiredTxPackets,
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "clients", Tags: tags, Fields: fields})
|
||||
}
|
||||
|
||||
// totalsDPImap: controller, site, name (app/cat name), dpi.
|
||||
type totalsDPImap map[string]map[string]map[string]unifi.DPIData
|
||||
|
||||
func (u *InfluxUnifi) batchClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to batchClientDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
category := unifi.DPICats.Get(dpi.Cat)
|
||||
application := unifi.DPIApps.GetApp(dpi.Cat, dpi.App)
|
||||
fillDPIMapTotals(appTotal, application, s.SourceName, s.SiteName, dpi)
|
||||
fillDPIMapTotals(catTotal, category, s.SourceName, s.SiteName, dpi)
|
||||
|
||||
r.send(&metric{
|
||||
Table: "clientdpi",
|
||||
Tags: map[string]string{
|
||||
"category": category,
|
||||
"application": application,
|
||||
"name": s.Name,
|
||||
"mac": s.MAC,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"tx_packets": dpi.TxPackets,
|
||||
"rx_packets": dpi.RxPackets,
|
||||
"tx_bytes": dpi.TxBytes,
|
||||
"rx_bytes": dpi.RxBytes,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
|
||||
// This allows less processing in InfluxDB to produce total transfer data per cat or app.
|
||||
func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
|
||||
if m[controller] == nil {
|
||||
m[controller] = make(map[string]map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
if m[controller][site] == nil {
|
||||
m[controller][site] = make(map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
existing := m[controller][site][name]
|
||||
existing.TxPackets += dpi.TxPackets
|
||||
existing.RxPackets += dpi.RxPackets
|
||||
existing.TxBytes += dpi.TxBytes
|
||||
existing.RxBytes += dpi.RxBytes
|
||||
m[controller][site][name] = existing
|
||||
}
|
||||
|
||||
func reportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
|
||||
type all []struct {
|
||||
kind string
|
||||
val totalsDPImap
|
||||
}
|
||||
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
if appTotal != nil {
|
||||
appTotal = nil
|
||||
}
|
||||
|
||||
// This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
|
||||
a := all{
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
{
|
||||
kind: "application",
|
||||
val: appTotal,
|
||||
},
|
||||
{
|
||||
kind: "category",
|
||||
val: catTotal,
|
||||
},
|
||||
}
|
||||
|
||||
for _, k := range a {
|
||||
for controller, s := range k.val {
|
||||
for site, c := range s {
|
||||
for name, m := range c {
|
||||
newMetric := &metric{
|
||||
Table: "clientdpi",
|
||||
Tags: map[string]string{
|
||||
"category": "TOTAL",
|
||||
"application": "TOTAL",
|
||||
"name": "TOTAL",
|
||||
"mac": "TOTAL",
|
||||
"site_name": site,
|
||||
"source": controller,
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"tx_packets": m.TxPackets,
|
||||
"rx_packets": m.RxPackets,
|
||||
"tx_bytes": m.TxBytes,
|
||||
"rx_bytes": m.RxBytes,
|
||||
},
|
||||
}
|
||||
newMetric.Tags[k.kind] = name
|
||||
|
||||
r.send(newMetric)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// These constants are used as names for printed/logged counters.
|
||||
const (
|
||||
eventT = item("Event")
|
||||
idsT = item("IDS")
|
||||
)
|
||||
|
||||
// batchIDS generates intrusion detection datapoints for InfluxDB.
|
||||
func (u *InfluxUnifi) batchIDS(r report, i *unifi.IDS) { // nolint:dupl
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dest_port": i.DestPort,
|
||||
"src_port": i.SrcPort,
|
||||
"dest_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"host": i.Host,
|
||||
"msg": i.Msg,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dstip_asn": i.DestIPGeo.Asn,
|
||||
"dstip_latitude": i.DestIPGeo.Latitude,
|
||||
"dstip_longitude": i.DestIPGeo.Longitude,
|
||||
"dstip_city": i.DestIPGeo.City,
|
||||
"dstip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dstip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dstip_country_name": i.DestIPGeo.CountryName,
|
||||
"dstip_organization": i.DestIPGeo.Organization,
|
||||
"srcip_asn": i.SourceIPGeo.Asn,
|
||||
"srcip_latitude": i.SourceIPGeo.Latitude,
|
||||
"srcip_longitude": i.SourceIPGeo.Longitude,
|
||||
"srcip_city": i.SourceIPGeo.City,
|
||||
"srcip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"srcip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"srcip_country_name": i.SourceIPGeo.CountryName,
|
||||
"srcip_organization": i.SourceIPGeo.Organization,
|
||||
}
|
||||
|
||||
r.addCount(idsT)
|
||||
r.send(&metric{
|
||||
Table: "unifi_ids",
|
||||
TS: i.Datetime,
|
||||
Fields: cleanFields(fields),
|
||||
Tags: cleanTags(map[string]string{
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"archived": i.Archived.Txt,
|
||||
"usgip": i.USGIP,
|
||||
"proto": i.Proto,
|
||||
"key": i.Key,
|
||||
"catname": i.Catname,
|
||||
"app_proto": i.AppProto,
|
||||
"action": i.InnerAlertAction,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// batchEvents generates events from UniFi for InfluxDB.
|
||||
func (u *InfluxUnifi) batchEvent(r report, i *unifi.Event) { // nolint: funlen
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"msg": i.Msg, // contains user[] or guest[] or admin[]
|
||||
"duration": i.Duration.Val, // probably microseconds?
|
||||
"guest": i.Guest, // mac address
|
||||
"user": i.User, // mac address
|
||||
"host": i.Host, // usg device?
|
||||
"hostname": i.Hostname, // client name
|
||||
"dest_port": i.DestPort,
|
||||
"src_port": i.SrcPort,
|
||||
"bytes": i.Bytes.Val,
|
||||
"dest_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"ip": i.IP,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dstip_asn": i.DestIPGeo.Asn,
|
||||
"dstip_latitude": i.DestIPGeo.Latitude,
|
||||
"dstip_longitude": i.DestIPGeo.Longitude,
|
||||
"dstip_city": i.DestIPGeo.City,
|
||||
"dstip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dstip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dstip_country_name": i.DestIPGeo.CountryName,
|
||||
"dstip_organization": i.DestIPGeo.Organization,
|
||||
"srcip_asn": i.SourceIPGeo.Asn,
|
||||
"srcip_latitude": i.SourceIPGeo.Latitude,
|
||||
"srcip_longitude": i.SourceIPGeo.Longitude,
|
||||
"srcip_city": i.SourceIPGeo.City,
|
||||
"srcip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"srcip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"srcip_country_name": i.SourceIPGeo.CountryName,
|
||||
"srcip_organization": i.SourceIPGeo.Organization,
|
||||
}
|
||||
|
||||
r.addCount(eventT)
|
||||
r.send(&metric{
|
||||
TS: i.Datetime,
|
||||
Table: "unifi_events",
|
||||
Fields: cleanFields(fields),
|
||||
Tags: cleanTags(map[string]string{
|
||||
"admin": i.Admin, // username
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"ap_from": i.ApFrom,
|
||||
"ap_to": i.ApTo,
|
||||
"ap": i.Ap,
|
||||
"ap_name": i.ApName,
|
||||
"gw": i.Gw,
|
||||
"gw_name": i.GwName,
|
||||
"sw": i.Sw,
|
||||
"sw_name": i.SwName,
|
||||
"catname": i.Catname,
|
||||
"radio": i.Radio,
|
||||
"radio_from": i.RadioFrom,
|
||||
"radio_to": i.RadioTo,
|
||||
"key": i.Key,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"ssid": i.SSID,
|
||||
"is_admin": i.IsAdmin.Txt,
|
||||
"channel": i.Channel.Txt,
|
||||
"channel_from": i.ChannelFrom.Txt,
|
||||
"channel_to": i.ChannelTo.Txt,
|
||||
"usgip": i.USGIP,
|
||||
"network": i.Network,
|
||||
"app_proto": i.AppProto,
|
||||
"proto": i.Proto,
|
||||
"action": i.InnerAlertAction,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// cleanTags removes any tag that is empty.
|
||||
func cleanTags(tags map[string]string) map[string]string {
|
||||
for i := range tags {
|
||||
if tags[i] == "" {
|
||||
delete(tags, i)
|
||||
}
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
// cleanFields removes any field with a default (or empty) value.
|
||||
func cleanFields(fields map[string]interface{}) map[string]interface{} { //nolint:cyclop
|
||||
for s := range fields {
|
||||
switch v := fields[s].(type) {
|
||||
case nil:
|
||||
delete(fields, s)
|
||||
case int, int64, float64:
|
||||
if v == 0 {
|
||||
delete(fields, s)
|
||||
}
|
||||
case unifi.FlexBool:
|
||||
if v.Txt == "" {
|
||||
delete(fields, s)
|
||||
}
|
||||
case unifi.FlexInt:
|
||||
if v.Txt == "" {
|
||||
delete(fields, s)
|
||||
}
|
||||
case string:
|
||||
if v == "" {
|
||||
delete(fields, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
|
@ -0,0 +1,298 @@
|
|||
// Package influxunifi provides the methods to turn UniFi measurements into influx
|
||||
// data-points with appropriate tags and fields.
|
||||
package influxunifi
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
influx "github.com/influxdata/influxdb1-client/v2"
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
"golift.io/cnfg"
|
||||
)
|
||||
|
||||
// PluginName is the name of this plugin.
|
||||
const PluginName = "influxdb"
|
||||
|
||||
const (
|
||||
defaultInterval = 30 * time.Second
|
||||
minimumInterval = 10 * time.Second
|
||||
defaultInfluxDB = "unifi"
|
||||
defaultInfluxUser = "unifipoller"
|
||||
defaultInfluxURL = "http://127.0.0.1:8086"
|
||||
)
|
||||
|
||||
// Config defines the data needed to store metrics in InfluxDB.
|
||||
type Config struct {
|
||||
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
|
||||
URL string `json:"url,omitempty" toml:"url,omitempty" xml:"url" yaml:"url"`
|
||||
User string `json:"user,omitempty" toml:"user,omitempty" xml:"user" yaml:"user"`
|
||||
Pass string `json:"pass,omitempty" toml:"pass,omitempty" xml:"pass" yaml:"pass"`
|
||||
DB string `json:"db,omitempty" toml:"db,omitempty" xml:"db" yaml:"db"`
|
||||
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
|
||||
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
|
||||
// Save data for dead ports? ie. ports that are down or disabled.
|
||||
DeadPorts bool `json:"dead_ports" toml:"dead_ports" xml:"dead_ports" yaml:"dead_ports"`
|
||||
}
|
||||
|
||||
// InfluxDB allows the data to be nested in the config file.
|
||||
type InfluxDB struct {
|
||||
*Config `json:"influxdb" toml:"influxdb" xml:"influxdb" yaml:"influxdb"`
|
||||
}
|
||||
|
||||
// InfluxUnifi is returned by New() after you provide a Config.
|
||||
type InfluxUnifi struct {
|
||||
Collector poller.Collect
|
||||
influx influx.Client
|
||||
LastCheck time.Time
|
||||
*InfluxDB
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
Table string
|
||||
Tags map[string]string
|
||||
Fields map[string]interface{}
|
||||
TS time.Time
|
||||
}
|
||||
|
||||
func init() { // nolint: gochecknoinits
|
||||
u := &InfluxUnifi{InfluxDB: &InfluxDB{}, LastCheck: time.Now()}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: PluginName,
|
||||
Config: u.InfluxDB,
|
||||
Method: u.Run,
|
||||
})
|
||||
}
|
||||
|
||||
// PollController runs forever, polling UniFi and pushing to InfluxDB
|
||||
// This is started by Run() or RunBoth() after everything checks out.
|
||||
func (u *InfluxUnifi) PollController() {
|
||||
interval := u.Interval.Round(time.Second)
|
||||
ticker := time.NewTicker(interval)
|
||||
log.Printf("[INFO] Poller->InfluxDB started, interval: %v, dp: %v, db: %s, url: %s",
|
||||
interval, u.DeadPorts, u.DB, u.URL)
|
||||
|
||||
for u.LastCheck = range ticker.C {
|
||||
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
|
||||
if err != nil {
|
||||
u.LogErrorf("metric fetch for InfluxDB failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
|
||||
if err != nil {
|
||||
u.LogErrorf("event fetch for InfluxDB failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
report, err := u.ReportMetrics(metrics, events)
|
||||
if err != nil {
|
||||
// XXX: reset and re-auth? not sure..
|
||||
u.LogErrorf("%v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
u.Logf("UniFi Metrics Recorded. %v", report)
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs a ticker to poll the unifi server and update influxdb.
|
||||
func (u *InfluxUnifi) Run(c poller.Collect) error {
|
||||
var err error
|
||||
|
||||
if u.Collector = c; u.Config == nil || u.Disable {
|
||||
u.Logf("InfluxDB config missing (or disabled), InfluxDB output disabled!")
|
||||
return nil
|
||||
}
|
||||
|
||||
u.setConfigDefaults()
|
||||
|
||||
u.influx, err = influx.NewHTTPClient(influx.HTTPConfig{
|
||||
Addr: u.URL,
|
||||
Username: u.User,
|
||||
Password: u.Pass,
|
||||
TLSConfig: &tls.Config{InsecureSkipVerify: !u.VerifySSL}, // nolint: gosec
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("making client: %w", err)
|
||||
}
|
||||
|
||||
fake := *u.Config
|
||||
fake.Pass = strconv.FormatBool(fake.Pass != "")
|
||||
|
||||
webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: fake})
|
||||
u.PollController()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) setConfigDefaults() {
|
||||
if u.URL == "" {
|
||||
u.URL = defaultInfluxURL
|
||||
}
|
||||
|
||||
if u.User == "" {
|
||||
u.User = defaultInfluxUser
|
||||
}
|
||||
|
||||
if strings.HasPrefix(u.Pass, "file://") {
|
||||
u.Pass = u.getPassFromFile(strings.TrimPrefix(u.Pass, "file://"))
|
||||
}
|
||||
|
||||
if u.Pass == "" {
|
||||
u.Pass = defaultInfluxUser
|
||||
}
|
||||
|
||||
if u.DB == "" {
|
||||
u.DB = defaultInfluxDB
|
||||
}
|
||||
|
||||
if u.Interval.Duration == 0 {
|
||||
u.Interval = cnfg.Duration{Duration: defaultInterval}
|
||||
} else if u.Interval.Duration < minimumInterval {
|
||||
u.Interval = cnfg.Duration{Duration: minimumInterval}
|
||||
}
|
||||
|
||||
u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) getPassFromFile(filename string) string {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
u.LogErrorf("Reading InfluxDB Password File: %v", err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(b))
|
||||
}
|
||||
|
||||
// ReportMetrics batches all device and client data into influxdb data points.
|
||||
// Call this after you've collected all the data you care about.
|
||||
// Returns an error if influxdb calls fail, otherwise returns a report.
|
||||
func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
|
||||
r := &Report{
|
||||
Metrics: m,
|
||||
Events: e,
|
||||
ch: make(chan *metric),
|
||||
Start: time.Now(),
|
||||
Counts: &Counts{Val: make(map[item]int)},
|
||||
}
|
||||
defer close(r.ch)
|
||||
|
||||
var err error
|
||||
|
||||
// Make a new Influx Points Batcher.
|
||||
r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.DB})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("influx.NewBatchPoint: %w", err)
|
||||
}
|
||||
|
||||
go u.collect(r, r.ch)
|
||||
// Batch all the points.
|
||||
u.loopPoints(r)
|
||||
r.wg.Wait() // wait for all points to finish batching!
|
||||
|
||||
// Send all the points.
|
||||
if err = u.influx.Write(r.bp); err != nil {
|
||||
return nil, fmt.Errorf("influxdb.Write(points): %w", err)
|
||||
}
|
||||
|
||||
r.Elapsed = time.Since(r.Start)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// collect runs in a go routine and batches all the points.
|
||||
func (u *InfluxUnifi) collect(r report, ch chan *metric) {
|
||||
for m := range ch {
|
||||
if m.TS.IsZero() {
|
||||
m.TS = r.metrics().TS
|
||||
}
|
||||
|
||||
pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, m.TS)
|
||||
if err == nil {
|
||||
r.batch(m, pt)
|
||||
}
|
||||
|
||||
r.error(err)
|
||||
r.done()
|
||||
}
|
||||
}
|
||||
|
||||
// loopPoints kicks off 3 or 7 go routines to process metrics and send them
|
||||
// to the collect routine through the metric channel.
|
||||
func (u *InfluxUnifi) loopPoints(r report) {
|
||||
m := r.metrics()
|
||||
|
||||
for _, s := range m.RogueAPs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Sites {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.SitesDPI {
|
||||
u.batchSiteDPI(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Clients {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Devices {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range r.events().Logs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
appTotal := make(totalsDPImap)
|
||||
catTotal := make(totalsDPImap)
|
||||
|
||||
for _, s := range m.ClientsDPI {
|
||||
u.batchClientDPI(r, s, appTotal, catTotal)
|
||||
}
|
||||
|
||||
reportClientDPItotals(r, appTotal, catTotal)
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
|
||||
switch v := v.(type) {
|
||||
case *unifi.RogueAP:
|
||||
u.batchRogueAP(r, v)
|
||||
case *unifi.UAP:
|
||||
u.batchUAP(r, v)
|
||||
case *unifi.USW:
|
||||
u.batchUSW(r, v)
|
||||
case *unifi.USG:
|
||||
u.batchUSG(r, v)
|
||||
case *unifi.UXG:
|
||||
u.batchUXG(r, v)
|
||||
case *unifi.UDM:
|
||||
u.batchUDM(r, v)
|
||||
case *unifi.Site:
|
||||
u.batchSite(r, v)
|
||||
case *unifi.Client:
|
||||
u.batchClient(r, v)
|
||||
case *unifi.Event:
|
||||
u.batchEvent(r, v)
|
||||
case *unifi.IDS:
|
||||
u.batchIDS(r, v)
|
||||
case *unifi.Alarm:
|
||||
u.batchAlarms(r, v)
|
||||
case *unifi.Anomaly:
|
||||
u.batchAnomaly(r, v)
|
||||
default:
|
||||
u.LogErrorf("invalid export type: %T", v)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
// Logf logs a message.
|
||||
func (u *InfluxUnifi) Logf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "info"},
|
||||
})
|
||||
u.Collector.Logf(msg, v...)
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (u *InfluxUnifi) LogErrorf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "error"},
|
||||
})
|
||||
u.Collector.LogErrorf(msg, v...)
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (u *InfluxUnifi) LogDebugf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "debug"},
|
||||
})
|
||||
u.Collector.LogDebugf(msg, v...)
|
||||
}
|
||||
|
|
@ -0,0 +1,113 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
influx "github.com/influxdata/influxdb1-client/v2"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
// Report is returned to the calling procedure after everything is processed.
|
||||
type Report struct {
|
||||
Metrics *poller.Metrics
|
||||
Events *poller.Events
|
||||
Errors []error
|
||||
Counts *Counts
|
||||
Start time.Time
|
||||
Elapsed time.Duration
|
||||
ch chan *metric
|
||||
wg sync.WaitGroup
|
||||
bp influx.BatchPoints
|
||||
}
|
||||
|
||||
// Counts holds counters and has a lock to deal with routines.
|
||||
type Counts struct {
|
||||
Val map[item]int
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// report is an internal interface that can be mocked and overridden for tests.
|
||||
type report interface {
|
||||
add()
|
||||
done()
|
||||
send(m *metric)
|
||||
error(err error)
|
||||
batch(m *metric, pt *influx.Point)
|
||||
metrics() *poller.Metrics
|
||||
events() *poller.Events
|
||||
addCount(item, ...int)
|
||||
}
|
||||
|
||||
func (r *Report) metrics() *poller.Metrics {
|
||||
return r.Metrics
|
||||
}
|
||||
|
||||
func (r *Report) events() *poller.Events {
|
||||
return r.Events
|
||||
}
|
||||
|
||||
func (r *Report) add() {
|
||||
r.wg.Add(1)
|
||||
}
|
||||
|
||||
func (r *Report) done() {
|
||||
r.wg.Done()
|
||||
}
|
||||
|
||||
func (r *Report) send(m *metric) {
|
||||
r.wg.Add(1)
|
||||
r.ch <- m
|
||||
}
|
||||
|
||||
/* The following methods are not thread safe. */
|
||||
|
||||
type item string
|
||||
|
||||
func (r *Report) addCount(name item, counts ...int) {
|
||||
r.Counts.Lock()
|
||||
defer r.Counts.Unlock()
|
||||
|
||||
if len(counts) == 0 {
|
||||
r.Counts.Val[name]++
|
||||
}
|
||||
|
||||
for _, c := range counts {
|
||||
r.Counts.Val[name] += c
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) error(err error) {
|
||||
if err != nil {
|
||||
r.Errors = append(r.Errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
// These constants are used as names for printed/logged counters.
|
||||
const (
|
||||
pointT = item("Point")
|
||||
fieldT = item("Fields")
|
||||
)
|
||||
|
||||
func (r *Report) batch(m *metric, p *influx.Point) {
|
||||
r.addCount(pointT)
|
||||
r.addCount(fieldT, len(m.Fields))
|
||||
r.bp.AddPoint(p)
|
||||
}
|
||||
|
||||
func (r *Report) String() string {
|
||||
r.Counts.RLock()
|
||||
defer r.Counts.RUnlock()
|
||||
|
||||
m, c := r.Metrics, r.Counts.Val
|
||||
|
||||
return fmt.Sprintf("Site: %d, Client: %d, "+
|
||||
"Gateways: %d, %s: %d, %s: %d, %s/%s/%s/%s: %d/%d/%d/%d, "+
|
||||
"DPI Site/Client: %d/%d, %s: %d, %s: %d, Err: %d, Dur: %v",
|
||||
len(m.Sites), len(m.Clients),
|
||||
c[udmT]+c[usgT]+c[uxgT], uapT, c[uapT], uswT, c[uswT],
|
||||
idsT, eventT, alarmT, anomalyT, c[idsT], c[eventT], c[alarmT], c[anomalyT],
|
||||
len(m.SitesDPI), len(m.ClientsDPI), pointT, c[pointT], fieldT, c[fieldT],
|
||||
len(r.Errors), r.Elapsed.Round(time.Millisecond))
|
||||
}
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// batchSite generates Unifi Sites' datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchSite(r report, s *unifi.Site) {
|
||||
for _, h := range s.Health {
|
||||
tags := map[string]string{
|
||||
"name": s.Name,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"desc": s.Desc,
|
||||
"status": h.Status,
|
||||
"subsystem": h.Subsystem,
|
||||
"wan_ip": h.WanIP,
|
||||
"gw_name": h.GwName,
|
||||
"lan_ip": h.LanIP,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"num_user": h.NumUser.Val,
|
||||
"num_guest": h.NumGuest.Val,
|
||||
"num_iot": h.NumIot.Val,
|
||||
"tx_bytes-r": h.TxBytesR.Val,
|
||||
"rx_bytes-r": h.RxBytesR.Val,
|
||||
"num_ap": h.NumAp.Val,
|
||||
"num_adopted": h.NumAdopted.Val,
|
||||
"num_disabled": h.NumDisabled.Val,
|
||||
"num_disconnected": h.NumDisconnected.Val,
|
||||
"num_pending": h.NumPending.Val,
|
||||
"num_gw": h.NumGw.Val,
|
||||
"wan_ip": h.WanIP,
|
||||
"num_sta": h.NumSta.Val,
|
||||
"gw_cpu": h.GwSystemStats.CPU.Val,
|
||||
"gw_mem": h.GwSystemStats.Mem.Val,
|
||||
"gw_uptime": h.GwSystemStats.Uptime.Val,
|
||||
"latency": h.Latency.Val,
|
||||
"uptime": h.Uptime.Val,
|
||||
"drops": h.Drops.Val,
|
||||
"xput_up": h.XputUp.Val,
|
||||
"xput_down": h.XputDown.Val,
|
||||
"speedtest_ping": h.SpeedtestPing.Val,
|
||||
"speedtest_lastrun": h.SpeedtestLastrun.Val,
|
||||
"num_sw": h.NumSw.Val,
|
||||
"remote_user_num_active": h.RemoteUserNumActive.Val,
|
||||
"remote_user_num_inactive": h.RemoteUserNumInactive.Val,
|
||||
"remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
|
||||
"remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
|
||||
"remote_user_rx_packets": h.RemoteUserRxPackets.Val,
|
||||
"remote_user_tx_packets": h.RemoteUserTxPackets.Val,
|
||||
"num_new_alarms": s.NumNewAlarms.Val,
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "subsystems", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchSiteDPI(r report, v interface{}) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to batchSiteDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
r.send(&metric{
|
||||
Table: "sitedpi",
|
||||
Tags: map[string]string{
|
||||
"category": unifi.DPICats.Get(dpi.Cat),
|
||||
"application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"tx_packets": dpi.TxPackets,
|
||||
"rx_packets": dpi.RxPackets,
|
||||
"tx_bytes": dpi.TxBytes,
|
||||
"rx_bytes": dpi.RxBytes,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,227 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uapT is used as a name for printed/logged counters.
|
||||
const uapT = item("UAP")
|
||||
|
||||
// batchRogueAP generates metric points for neighboring access points.
|
||||
func (u *InfluxUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
|
||||
if s.Age.Val == 0 {
|
||||
return // only keep metrics for things that are recent.
|
||||
}
|
||||
|
||||
r.send(&metric{
|
||||
Table: "uap_rogue",
|
||||
Tags: map[string]string{
|
||||
"security": s.Security,
|
||||
"oui": s.Oui,
|
||||
"band": s.Band,
|
||||
"mac": s.Bssid,
|
||||
"ap_mac": s.ApMac,
|
||||
"radio": s.Radio,
|
||||
"radio_name": s.RadioName,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Essid,
|
||||
"source": s.SourceName,
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"age": s.Age.Val,
|
||||
"bw": s.Bw.Val,
|
||||
"center_freq": s.CenterFreq.Val,
|
||||
"channel": s.Channel,
|
||||
"freq": s.Freq.Val,
|
||||
"noise": s.Noise.Val,
|
||||
"rssi": s.Rssi.Val,
|
||||
"rssi_age": s.RssiAge.Val,
|
||||
"signal": s.Signal.Val,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// batchUAP generates Wireless-Access-Point datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchUAP(r report, s *unifi.UAP) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields := Combine(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
|
||||
fields["ip"] = s.IP
|
||||
fields["bytes"] = s.Bytes.Val
|
||||
fields["last_seen"] = s.LastSeen.Val
|
||||
fields["rx_bytes"] = s.RxBytes.Val
|
||||
fields["tx_bytes"] = s.TxBytes.Val
|
||||
fields["uptime"] = s.Uptime.Val
|
||||
fields["user-num_sta"] = int(s.UserNumSta.Val)
|
||||
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
|
||||
fields["num_sta"] = s.NumSta.Val
|
||||
|
||||
r.addCount(uapT)
|
||||
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
|
||||
u.processRadTable(r, tags, s.RadioTable, s.RadioTableStats)
|
||||
u.processVAPTable(r, tags, s.VapTable)
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) processUAPstats(ap *unifi.Ap) map[string]interface{} {
|
||||
if ap == nil {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
// Accumulative Statistics.
|
||||
return map[string]interface{}{
|
||||
"stat_user-rx_packets": ap.UserRxPackets.Val,
|
||||
"stat_guest-rx_packets": ap.GuestRxPackets.Val,
|
||||
"stat_rx_packets": ap.RxPackets.Val,
|
||||
"stat_user-rx_bytes": ap.UserRxBytes.Val,
|
||||
"stat_guest-rx_bytes": ap.GuestRxBytes.Val,
|
||||
"stat_rx_bytes": ap.RxBytes.Val,
|
||||
"stat_user-rx_errors": ap.UserRxErrors.Val,
|
||||
"stat_guest-rx_errors": ap.GuestRxErrors.Val,
|
||||
"stat_rx_errors": ap.RxErrors.Val,
|
||||
"stat_user-rx_dropped": ap.UserRxDropped.Val,
|
||||
"stat_guest-rx_dropped": ap.GuestRxDropped.Val,
|
||||
"stat_rx_dropped": ap.RxDropped.Val,
|
||||
"stat_user-rx_crypts": ap.UserRxCrypts.Val,
|
||||
"stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
|
||||
"stat_rx_crypts": ap.RxCrypts.Val,
|
||||
"stat_user-rx_frags": ap.UserRxFrags.Val,
|
||||
"stat_guest-rx_frags": ap.GuestRxFrags.Val,
|
||||
"stat_rx_frags": ap.RxFrags.Val,
|
||||
"stat_user-tx_packets": ap.UserTxPackets.Val,
|
||||
"stat_guest-tx_packets": ap.GuestTxPackets.Val,
|
||||
"stat_tx_packets": ap.TxPackets.Val,
|
||||
"stat_user-tx_bytes": ap.UserTxBytes.Val,
|
||||
"stat_guest-tx_bytes": ap.GuestTxBytes.Val,
|
||||
"stat_tx_bytes": ap.TxBytes.Val,
|
||||
"stat_user-tx_errors": ap.UserTxErrors.Val,
|
||||
"stat_guest-tx_errors": ap.GuestTxErrors.Val,
|
||||
"stat_tx_errors": ap.TxErrors.Val,
|
||||
"stat_user-tx_dropped": ap.UserTxDropped.Val,
|
||||
"stat_guest-tx_dropped": ap.GuestTxDropped.Val,
|
||||
"stat_tx_dropped": ap.TxDropped.Val,
|
||||
"stat_user-tx_retries": ap.UserTxRetries.Val,
|
||||
"stat_guest-tx_retries": ap.GuestTxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
|
||||
func (u *InfluxUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) { // nolint: funlen
|
||||
for _, s := range vt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"ap_mac": s.ApMac,
|
||||
"bssid": s.Bssid,
|
||||
"id": s.ID,
|
||||
"name": s.Name,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"essid": s.Essid,
|
||||
"site_id": s.SiteID,
|
||||
"usage": s.Usage,
|
||||
"state": s.State,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"ccq": s.Ccq,
|
||||
"mac_filter_rejections": s.MacFilterRejections,
|
||||
"num_satisfaction_sta": s.NumSatisfactionSta.Val,
|
||||
"avg_client_signal": s.AvgClientSignal.Val,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"satisfaction_now": s.SatisfactionNow.Val,
|
||||
"num_sta": s.NumSta,
|
||||
"channel": s.Channel.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"rx_crypts": s.RxCrypts.Val,
|
||||
"rx_dropped": s.RxDropped.Val,
|
||||
"rx_errors": s.RxErrors.Val,
|
||||
"rx_frags": s.RxFrags.Val,
|
||||
"rx_nwids": s.RxNwids.Val,
|
||||
"rx_packets": s.RxPackets.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"tx_dropped": s.TxDropped.Val,
|
||||
"tx_errors": s.TxErrors.Val,
|
||||
"tx_packets": s.TxPackets.Val,
|
||||
"tx_power": s.TxPower.Val,
|
||||
"tx_retries": s.TxRetries.Val,
|
||||
"tx_combined_retries": s.TxCombinedRetries.Val,
|
||||
"tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
|
||||
"tx_rts_retries": s.TxRtsRetries.Val,
|
||||
"tx_success": s.TxSuccess.Val,
|
||||
"tx_total": s.TxTotal.Val,
|
||||
"tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
|
||||
"tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
|
||||
"tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
|
||||
"tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
|
||||
"rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
|
||||
"rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
|
||||
"rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
|
||||
"rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
|
||||
"wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
|
||||
"wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
|
||||
"wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
|
||||
"wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
|
||||
"wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "uap_vaps", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
|
||||
for _, p := range rt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"channel": p.Channel.Txt,
|
||||
"radio": p.Radio,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"current_antenna_gain": p.CurrentAntennaGain.Val,
|
||||
"ht": p.Ht.Txt,
|
||||
"max_txpower": p.MaxTxpower.Val,
|
||||
"min_txpower": p.MinTxpower.Val,
|
||||
"nss": p.Nss.Val,
|
||||
"radio_caps": p.RadioCaps.Val,
|
||||
}
|
||||
|
||||
for _, t := range rts {
|
||||
if t.Name == p.Name {
|
||||
fields["ast_be_xmit"] = t.AstBeXmit.Val
|
||||
fields["channel"] = t.Channel.Val
|
||||
fields["cu_self_rx"] = t.CuSelfRx.Val
|
||||
fields["cu_self_tx"] = t.CuSelfTx.Val
|
||||
fields["cu_total"] = t.CuTotal.Val
|
||||
fields["extchannel"] = t.Extchannel.Val
|
||||
fields["gain"] = t.Gain.Val
|
||||
fields["guest-num_sta"] = t.GuestNumSta.Val
|
||||
fields["num_sta"] = t.NumSta.Val
|
||||
fields["radio"] = t.Radio
|
||||
fields["tx_packets"] = t.TxPackets.Val
|
||||
fields["tx_power"] = t.TxPower.Val
|
||||
fields["tx_retries"] = t.TxRetries.Val
|
||||
fields["user-num_sta"] = t.UserNumSta.Val
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "uap_radios", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,179 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// udmT is used as a name for printed/logged counters.
|
||||
const udmT = item("UDM")
|
||||
|
||||
// Combine concatenates N maps. This will delete things if not used with caution.
|
||||
func Combine(in ...map[string]interface{}) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
|
||||
for i := range in {
|
||||
for k := range in[i] {
|
||||
out[k] = in[i][k]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// batchSysStats is used by all device types.
|
||||
func (u *InfluxUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]interface{} {
|
||||
m := map[string]interface{}{
|
||||
"loadavg_1": s.Loadavg1.Val,
|
||||
"loadavg_5": s.Loadavg5.Val,
|
||||
"loadavg_15": s.Loadavg15.Val,
|
||||
"mem_used": s.MemUsed.Val,
|
||||
"mem_buffer": s.MemBuffer.Val,
|
||||
"mem_total": s.MemTotal.Val,
|
||||
"cpu": ss.CPU.Val,
|
||||
"mem": ss.Mem.Val,
|
||||
"system_uptime": ss.Uptime.Val,
|
||||
}
|
||||
|
||||
for k, v := range ss.Temps {
|
||||
temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
|
||||
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
|
||||
|
||||
if temp != 0 && k != "" {
|
||||
m["temp_"+strings.ToLower(k)] = temp
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]interface{} {
|
||||
output := make(map[string]interface{})
|
||||
|
||||
for _, t := range temps {
|
||||
output["temp_"+t.Name] = t.Value
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]interface{} {
|
||||
output := make(map[string]interface{})
|
||||
|
||||
for _, t := range storage {
|
||||
output["storage_"+t.Name+"_size"] = t.Size.Val
|
||||
output["storage_"+t.Name+"_used"] = t.Used.Val
|
||||
|
||||
if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
|
||||
output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
|
||||
} else {
|
||||
output["storage_"+t.Name+"_pct"] = 0
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// batchUDM generates Unifi Gateway datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields := Combine(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]interface{}{
|
||||
"source": s.SourceName,
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"license_state": s.LicenseState,
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user-num_sta": s.UserNumSta.Val,
|
||||
"version": s.Version,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(udmT)
|
||||
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields = Combine(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]interface{}{
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
|
||||
if s.Stat.Ap == nil {
|
||||
return // we're done now. the following code process UDM (non-pro) UAP data.
|
||||
}
|
||||
|
||||
tags = map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields = u.processUAPstats(s.Stat.Ap)
|
||||
fields["ip"] = s.IP
|
||||
fields["bytes"] = s.Bytes.Val
|
||||
fields["last_seen"] = s.LastSeen.Val
|
||||
fields["rx_bytes"] = s.RxBytes.Val
|
||||
fields["tx_bytes"] = s.TxBytes.Val
|
||||
fields["uptime"] = s.Uptime.Val
|
||||
fields["state"] = s.State
|
||||
fields["user-num_sta"] = int(s.UserNumSta.Val)
|
||||
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
|
||||
fields["num_sta"] = s.NumSta.Val
|
||||
|
||||
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
|
||||
u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
|
||||
u.processVAPTable(r, tags, *s.VapTable)
|
||||
}
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// usgT is used as a name for printed/logged counters.
|
||||
const usgT = item("USG")
|
||||
|
||||
// batchUSG generates Unifi Gateway datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchUSG(r report, s *unifi.USG) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields := Combine(
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
map[string]interface{}{
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"license_state": s.LicenseState,
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user-num_sta": s.UserNumSta.Val,
|
||||
"version": s.Version,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
// "speedtest_rundate": time.Unix(int64(s.SpeedtestStatus.Rundate.Val), 0).String(),
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(usgT)
|
||||
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchUSGstats(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]interface{} {
|
||||
if gw == nil {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"uplink_latency": ul.Latency.Val,
|
||||
"uplink_speed": ul.Speed.Val,
|
||||
"speedtest-status_latency": ss.Latency.Val,
|
||||
"speedtest-status_runtime": ss.Runtime.Val,
|
||||
"speedtest-status_rundate": ss.Rundate.Val,
|
||||
"speedtest-status_ping": ss.StatusPing.Val,
|
||||
"speedtest-status_xput_download": ss.XputDownload.Val,
|
||||
"speedtest-status_xput_upload": ss.XputUpload.Val,
|
||||
"lan-rx_bytes": gw.LanRxBytes.Val,
|
||||
"lan-rx_packets": gw.LanRxPackets.Val,
|
||||
"lan-tx_bytes": gw.LanTxBytes.Val,
|
||||
"lan-tx_packets": gw.LanTxPackets.Val,
|
||||
"lan-rx_dropped": gw.LanRxDropped.Val,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
|
||||
for _, wan := range wans {
|
||||
if !wan.Up.Val {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"ip": wan.IP,
|
||||
"purpose": wan.Name,
|
||||
"mac": wan.Mac,
|
||||
"ifname": wan.Ifname,
|
||||
"type": wan.Type,
|
||||
"up": wan.Up.Txt,
|
||||
"enabled": wan.Enable.Txt,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"bytes-r": wan.BytesR.Val,
|
||||
"full_duplex": wan.FullDuplex.Val,
|
||||
"gateway": wan.Gateway,
|
||||
"max_speed": wan.MaxSpeed.Val,
|
||||
"rx_bytes": wan.RxBytes.Val,
|
||||
"rx_bytes-r": wan.RxBytesR.Val,
|
||||
"rx_dropped": wan.RxDropped.Val,
|
||||
"rx_errors": wan.RxErrors.Val,
|
||||
"rx_broadcast": wan.RxBroadcast.Val,
|
||||
"rx_multicast": wan.RxMulticast.Val,
|
||||
"rx_packets": wan.RxPackets.Val,
|
||||
"speed": wan.Speed.Val,
|
||||
"tx_bytes": wan.TxBytes.Val,
|
||||
"tx_bytes-r": wan.TxBytesR.Val,
|
||||
"tx_dropped": wan.TxDropped.Val,
|
||||
"tx_errors": wan.TxErrors.Val,
|
||||
"tx_packets": wan.TxPackets.Val,
|
||||
"tx_broadcast": wan.TxBroadcast.Val,
|
||||
"tx_multicast": wan.TxMulticast.Val,
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "usg_wan_ports", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
|
||||
for _, p := range nt {
|
||||
tags := map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"up": p.Up.Txt,
|
||||
"enabled": p.Enabled.Txt,
|
||||
"ip": p.IP,
|
||||
"mac": p.Mac,
|
||||
"name": p.Name,
|
||||
"domain_name": p.DomainName,
|
||||
"purpose": p.Purpose,
|
||||
"is_guest": p.IsGuest.Txt,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"num_sta": p.NumSta.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "usg_networks", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uswT is used as a name for printed/logged counters.
|
||||
const uswT = item("USW")
|
||||
|
||||
// batchUSW generates Unifi Switch datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchUSW(r report, s *unifi.USW) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields := Combine(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]interface{}{
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"fan_level": s.FanLevel.Val,
|
||||
"general_temperature": s.GeneralTemperature.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user-num_sta": s.UserNumSta.Val,
|
||||
})
|
||||
|
||||
r.addCount(uswT)
|
||||
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *InfluxUnifi) batchUSWstat(sw *unifi.Sw) map[string]interface{} {
|
||||
if sw == nil {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"stat_bytes": sw.Bytes.Val,
|
||||
"stat_rx_bytes": sw.RxBytes.Val,
|
||||
"stat_rx_crypts": sw.RxCrypts.Val,
|
||||
"stat_rx_dropped": sw.RxDropped.Val,
|
||||
"stat_rx_errors": sw.RxErrors.Val,
|
||||
"stat_rx_frags": sw.RxFrags.Val,
|
||||
"stat_rx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_bytes": sw.TxBytes.Val,
|
||||
"stat_tx_dropped": sw.TxDropped.Val,
|
||||
"stat_tx_errors": sw.TxErrors.Val,
|
||||
"stat_tx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_retries": sw.TxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:funlen
|
||||
func (u *InfluxUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
|
||||
for _, p := range pt {
|
||||
if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
|
||||
continue // only record UP ports.
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"site_name": t["site_name"],
|
||||
"device_name": t["name"],
|
||||
"source": t["source"],
|
||||
"type": t["type"],
|
||||
"name": p.Name,
|
||||
"poe_mode": p.PoeMode,
|
||||
"port_poe": p.PortPoe.Txt,
|
||||
"port_idx": p.PortIdx.Txt,
|
||||
"port_id": t["name"] + " Port " + p.PortIdx.Txt,
|
||||
"poe_enable": p.PoeEnable.Txt,
|
||||
"flowctrl_rx": p.FlowctrlRx.Txt,
|
||||
"flowctrl_tx": p.FlowctrlTx.Txt,
|
||||
"media": p.Media,
|
||||
"has_sfp": p.SFPFound.Txt,
|
||||
"sfp_compliance": p.SFPCompliance,
|
||||
"sfp_serial": p.SFPSerial,
|
||||
"sfp_vendor": p.SFPVendor,
|
||||
"sfp_part": p.SFPPart,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"dbytes_r": p.BytesR.Val,
|
||||
"rx_broadcast": p.RxBroadcast.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_bytes-r": p.RxBytesR.Val,
|
||||
"rx_dropped": p.RxDropped.Val,
|
||||
"rx_errors": p.RxErrors.Val,
|
||||
"rx_multicast": p.RxMulticast.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"speed": p.Speed.Val,
|
||||
"stp_pathcost": p.StpPathcost.Val,
|
||||
"tx_broadcast": p.TxBroadcast.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_bytes-r": p.TxBytesR.Val,
|
||||
"tx_dropped": p.TxDropped.Val,
|
||||
"tx_errors": p.TxErrors.Val,
|
||||
"tx_multicast": p.TxMulticast.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
if p.PoeEnable.Val && p.PortPoe.Val {
|
||||
fields["poe_current"] = p.PoeCurrent.Val
|
||||
fields["poe_power"] = p.PoePower.Val
|
||||
fields["poe_voltage"] = p.PoeVoltage.Val
|
||||
}
|
||||
|
||||
if p.SFPFound.Val {
|
||||
fields["sfp_current"] = p.SFPCurrent.Val
|
||||
fields["sfp_voltage"] = p.SFPVoltage.Val
|
||||
fields["sfp_temperature"] = p.SFPTemperature.Val
|
||||
fields["sfp_txpower"] = p.SFPTxpower.Val
|
||||
fields["sfp_rxpower"] = p.SFPRxpower.Val
|
||||
}
|
||||
|
||||
r.send(&metric{Table: "usw_ports", Tags: tags, Fields: fields})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
package influxunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uxgT is used as a name for printed/logged counters.
|
||||
const uxgT = item("UXG")
|
||||
|
||||
// batchUXG generates 10Gb Unifi Gateway datapoints for InfluxDB.
|
||||
// These points can be passed directly to influx.
|
||||
func (u *InfluxUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields := Combine(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]interface{}{
|
||||
"source": s.SourceName,
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"license_state": s.LicenseState,
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user-num_sta": s.UserNumSta.Val,
|
||||
"version": s.Version,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(uxgT)
|
||||
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
}
|
||||
fields = Combine(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]interface{}{
|
||||
"guest-num_sta": s.GuestNumSta.Val,
|
||||
"ip": s.IP,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT LICENSE.
|
||||
Copyright (c) 2018-2021 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# inputunifi
|
||||
|
||||
## UnPoller Input Plugin
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
package inputunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
/* Event collection. Events are also sent to the webserver for display. */
|
||||
|
||||
func (u *InputUnifi) collectControllerEvents(c *Controller) ([]interface{}, error) {
|
||||
if u.isNill(c) {
|
||||
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
|
||||
|
||||
if err := u.getUnifi(c); err != nil {
|
||||
return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
logs = []interface{}{}
|
||||
newLogs []interface{}
|
||||
)
|
||||
|
||||
// Get the sites we care about.
|
||||
sites, err := u.getFilteredSites(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetSites(): %w", err)
|
||||
}
|
||||
|
||||
type caller func([]interface{}, []*unifi.Site, *Controller) ([]interface{}, error)
|
||||
|
||||
for _, call := range []caller{u.collectIDS, u.collectAnomalies, u.collectAlarms, u.collectEvents} {
|
||||
if newLogs, err = call(logs, sites, c); err != nil {
|
||||
return logs, err
|
||||
}
|
||||
|
||||
logs = append(logs, newLogs...)
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) collectAlarms(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
|
||||
if *c.SaveAlarms {
|
||||
for _, s := range sites {
|
||||
events, err := c.Unifi.GetAlarmsSite(s)
|
||||
if err != nil {
|
||||
return logs, fmt.Errorf("unifi.GetAlarms(): %w", err)
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
logs = append(logs, e)
|
||||
|
||||
webserver.NewInputEvent(PluginName, s.ID+"_alarms", &webserver.Event{
|
||||
Ts: e.Datetime, Msg: e.Msg, Tags: map[string]string{
|
||||
"type": "alarm", "key": e.Key, "site_id": e.SiteID,
|
||||
"site_name": e.SiteName, "source": e.SourceName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) collectAnomalies(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
|
||||
if *c.SaveAnomal {
|
||||
for _, s := range sites {
|
||||
events, err := c.Unifi.GetAnomaliesSite(s)
|
||||
if err != nil {
|
||||
return logs, fmt.Errorf("unifi.GetAnomalies(): %w", err)
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
logs = append(logs, e)
|
||||
|
||||
webserver.NewInputEvent(PluginName, s.ID+"_anomalies", &webserver.Event{
|
||||
Ts: e.Datetime, Msg: e.Anomaly, Tags: map[string]string{
|
||||
"type": "anomaly", "site_name": e.SiteName, "source": e.SourceName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) collectEvents(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
|
||||
if *c.SaveEvents {
|
||||
for _, s := range sites {
|
||||
events, err := c.Unifi.GetSiteEvents(s, time.Hour)
|
||||
if err != nil {
|
||||
return logs, fmt.Errorf("unifi.GetEvents(): %w", err)
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
e := redactEvent(e, c.HashPII)
|
||||
logs = append(logs, e)
|
||||
|
||||
webserver.NewInputEvent(PluginName, s.ID+"_events", &webserver.Event{
|
||||
Msg: e.Msg, Ts: e.Datetime, Tags: map[string]string{
|
||||
"type": "event", "key": e.Key, "site_id": e.SiteID,
|
||||
"site_name": e.SiteName, "source": e.SourceName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) collectIDS(logs []interface{}, sites []*unifi.Site, c *Controller) ([]interface{}, error) {
|
||||
if *c.SaveIDS {
|
||||
for _, s := range sites {
|
||||
events, err := c.Unifi.GetIDSSite(s)
|
||||
if err != nil {
|
||||
return logs, fmt.Errorf("unifi.GetIDS(): %w", err)
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
logs = append(logs, e)
|
||||
|
||||
webserver.NewInputEvent(PluginName, s.ID+"_ids", &webserver.Event{
|
||||
Ts: e.Datetime, Msg: e.Msg, Tags: map[string]string{
|
||||
"type": "ids", "key": e.Key, "site_id": e.SiteID,
|
||||
"site_name": e.SiteName, "source": e.SourceName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// redactEvent attempts to mask personally identying information from log messages.
|
||||
// This currently misses the "msg" value entirely and leaks PII information.
|
||||
func redactEvent(e *unifi.Event, hash *bool) *unifi.Event {
|
||||
if !*hash {
|
||||
return e
|
||||
}
|
||||
|
||||
// metrics.Events[i].Msg <-- not sure what to do here.
|
||||
e.DestIPGeo = unifi.IPGeo{}
|
||||
e.SourceIPGeo = unifi.IPGeo{}
|
||||
e.Host = RedactNamePII(e.Host, hash)
|
||||
e.Hostname = RedactNamePII(e.Hostname, hash)
|
||||
e.DstMAC = RedactMacPII(e.DstMAC, hash)
|
||||
e.SrcMAC = RedactMacPII(e.SrcMAC, hash)
|
||||
|
||||
return e
|
||||
}
|
||||
|
|
@ -0,0 +1,269 @@
|
|||
package inputunifi
|
||||
|
||||
// nolint: gosec
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
var ErrScrapeFilterMatchFailed = fmt.Errorf("scrape filter match failed, and filter is not http URL")
|
||||
|
||||
func (u *InputUnifi) isNill(c *Controller) bool {
|
||||
u.RLock()
|
||||
defer u.RUnlock()
|
||||
|
||||
return c.Unifi == nil
|
||||
}
|
||||
|
||||
// newDynamicCntrlr creates and saves a controller definition for further use.
|
||||
// This is called when an unconfigured controller is requested.
|
||||
func (u *InputUnifi) newDynamicCntrlr(url string) (bool, *Controller) {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
|
||||
if c := u.dynamic[url]; c != nil {
|
||||
// it already exists.
|
||||
return false, c
|
||||
}
|
||||
|
||||
ccopy := u.Default // copy defaults into new controller
|
||||
u.dynamic[url] = &ccopy
|
||||
u.dynamic[url].URL = url
|
||||
|
||||
return true, u.dynamic[url]
|
||||
}
|
||||
|
||||
func (u *InputUnifi) dynamicController(filter *poller.Filter) (*poller.Metrics, error) {
|
||||
if !strings.HasPrefix(filter.Path, "http") {
|
||||
return nil, ErrScrapeFilterMatchFailed
|
||||
}
|
||||
|
||||
newCntrlr, c := u.newDynamicCntrlr(filter.Path)
|
||||
|
||||
if newCntrlr {
|
||||
u.Logf("Authenticating to Dynamic UniFi Controller: %s", filter.Path)
|
||||
|
||||
if err := u.getUnifi(c); err != nil {
|
||||
u.logController(c)
|
||||
return nil, fmt.Errorf("authenticating to %s: %w", filter.Path, err)
|
||||
}
|
||||
|
||||
u.logController(c)
|
||||
}
|
||||
|
||||
return u.collectController(c)
|
||||
}
|
||||
|
||||
func (u *InputUnifi) collectController(c *Controller) (*poller.Metrics, error) {
|
||||
if u.isNill(c) {
|
||||
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
|
||||
|
||||
if err := u.getUnifi(c); err != nil {
|
||||
return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, err := u.pollController(c)
|
||||
if err != nil {
|
||||
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
|
||||
|
||||
if err := u.getUnifi(c); err != nil {
|
||||
return metrics, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func (u *InputUnifi) pollController(c *Controller) (*poller.Metrics, error) {
|
||||
u.RLock()
|
||||
defer u.RUnlock()
|
||||
|
||||
// Get the sites we care about.
|
||||
sites, err := u.getFilteredSites(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetSites(): %w", err)
|
||||
}
|
||||
|
||||
m := &Metrics{TS: time.Now(), Sites: sites}
|
||||
defer updateWeb(c, m)
|
||||
|
||||
if c.SaveRogue != nil && *c.SaveRogue {
|
||||
if m.RogueAPs, err = c.Unifi.GetRogueAPs(sites); err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetRogueAPs(%s): %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.SaveDPI != nil && *c.SaveDPI {
|
||||
if m.SitesDPI, err = c.Unifi.GetSiteDPI(sites); err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetSiteDPI(%s): %w", c.URL, err)
|
||||
}
|
||||
|
||||
if m.ClientsDPI, err = c.Unifi.GetClientsDPI(sites); err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetClientsDPI(%s): %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get all the points.
|
||||
if m.Clients, err = c.Unifi.GetClients(sites); err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetClients(%s): %w", c.URL, err)
|
||||
}
|
||||
|
||||
if m.Devices, err = c.Unifi.GetDevices(sites); err != nil {
|
||||
return nil, fmt.Errorf("unifi.GetDevices(%s): %w", c.URL, err)
|
||||
}
|
||||
|
||||
return u.augmentMetrics(c, m), nil
|
||||
}
|
||||
|
||||
// augmentMetrics is our middleware layer between collecting metrics and writing them.
|
||||
// This is where we can manipuate the returned data or make arbitrary decisions.
|
||||
// This method currently adds parent device names to client metrics and hashes PII.
|
||||
// This method also converts our local *Metrics type into a slice of interfaces for poller.
|
||||
func (u *InputUnifi) augmentMetrics(c *Controller, metrics *Metrics) *poller.Metrics {
|
||||
if metrics == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m, devices, bssdIDs := extractDevices(metrics)
|
||||
|
||||
// These come blank, so set them here.
|
||||
for _, client := range metrics.Clients {
|
||||
if devices[client.Mac] = client.Name; client.Name == "" {
|
||||
devices[client.Mac] = client.Hostname
|
||||
}
|
||||
|
||||
client.Mac = RedactMacPII(client.Mac, c.HashPII)
|
||||
client.Name = RedactNamePII(client.Name, c.HashPII)
|
||||
client.Hostname = RedactNamePII(client.Hostname, c.HashPII)
|
||||
client.SwName = devices[client.SwMac]
|
||||
client.ApName = devices[client.ApMac]
|
||||
client.GwName = devices[client.GwMac]
|
||||
client.RadioDescription = bssdIDs[client.Bssid] + client.RadioProto
|
||||
m.Clients = append(m.Clients, client)
|
||||
}
|
||||
|
||||
for _, client := range metrics.ClientsDPI {
|
||||
// Name on Client DPI data also comes blank, find it based on MAC address.
|
||||
client.Name = devices[client.MAC]
|
||||
if client.Name == "" {
|
||||
client.Name = client.MAC
|
||||
}
|
||||
|
||||
client.Name = RedactNamePII(client.Name, c.HashPII)
|
||||
client.MAC = RedactMacPII(client.MAC, c.HashPII)
|
||||
m.ClientsDPI = append(m.ClientsDPI, client)
|
||||
}
|
||||
|
||||
for _, ap := range metrics.RogueAPs {
|
||||
// XXX: do we need augment this data?
|
||||
m.RogueAPs = append(m.RogueAPs, ap)
|
||||
}
|
||||
|
||||
if *c.SaveSites {
|
||||
for _, site := range metrics.Sites {
|
||||
m.Sites = append(m.Sites, site)
|
||||
}
|
||||
|
||||
for _, site := range metrics.SitesDPI {
|
||||
m.SitesDPI = append(m.SitesDPI, site)
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// this is a helper function for augmentMetrics.
|
||||
func extractDevices(metrics *Metrics) (*poller.Metrics, map[string]string, map[string]string) {
|
||||
m := &poller.Metrics{TS: metrics.TS}
|
||||
devices := make(map[string]string)
|
||||
bssdIDs := make(map[string]string)
|
||||
|
||||
for _, r := range metrics.Devices.UAPs {
|
||||
devices[r.Mac] = r.Name
|
||||
m.Devices = append(m.Devices, r)
|
||||
|
||||
for _, v := range r.VapTable {
|
||||
bssdIDs[v.Bssid] = fmt.Sprintf("%s %s %s:", r.Name, v.Radio, v.RadioName)
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range metrics.Devices.USGs {
|
||||
devices[r.Mac] = r.Name
|
||||
m.Devices = append(m.Devices, r)
|
||||
}
|
||||
|
||||
for _, r := range metrics.Devices.USWs {
|
||||
devices[r.Mac] = r.Name
|
||||
m.Devices = append(m.Devices, r)
|
||||
}
|
||||
|
||||
for _, r := range metrics.Devices.UDMs {
|
||||
devices[r.Mac] = r.Name
|
||||
m.Devices = append(m.Devices, r)
|
||||
}
|
||||
|
||||
for _, r := range metrics.Devices.UXGs {
|
||||
devices[r.Mac] = r.Name
|
||||
m.Devices = append(m.Devices, r)
|
||||
}
|
||||
|
||||
return m, devices, bssdIDs
|
||||
}
|
||||
|
||||
// RedactNamePII converts a name string to an md5 hash (first 24 chars only).
|
||||
// Useful for maskiing out personally identifying information.
|
||||
func RedactNamePII(pii string, hash *bool) string {
|
||||
if hash == nil || !*hash || pii == "" {
|
||||
return pii
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("%x", md5.Sum([]byte(pii))) // nolint: gosec
|
||||
// instead of 32 characters, only use 24.
|
||||
return s[:24]
|
||||
}
|
||||
|
||||
// RedactMacPII converts a MAC address to an md5 hashed version (first 14 chars only).
|
||||
// Useful for maskiing out personally identifying information.
|
||||
func RedactMacPII(pii string, hash *bool) (output string) {
|
||||
if hash == nil || !*hash || pii == "" {
|
||||
return pii
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("%x", md5.Sum([]byte(pii))) // nolint: gosec
|
||||
// This formats a "fake" mac address looking string.
|
||||
return fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s", s[:2], s[2:4], s[4:6], s[6:8], s[8:10], s[10:12], s[12:14])
|
||||
}
|
||||
|
||||
// getFilteredSites returns a list of sites to fetch data for.
|
||||
// Omits requested but unconfigured sites. Grabs the full list from the
|
||||
// controller and returns the sites provided in the config file.
|
||||
func (u *InputUnifi) getFilteredSites(c *Controller) ([]*unifi.Site, error) {
|
||||
u.RLock()
|
||||
defer u.RUnlock()
|
||||
|
||||
sites, err := c.Unifi.GetSites()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("controller: %w", err)
|
||||
} else if len(c.Sites) == 0 || StringInSlice("all", c.Sites) {
|
||||
return sites, nil
|
||||
}
|
||||
|
||||
i := 0
|
||||
|
||||
for _, s := range sites {
|
||||
// Only include valid sites in the request filter.
|
||||
if StringInSlice(s.Name, c.Sites) {
|
||||
sites[i] = s
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
return sites[:i], nil
|
||||
}
|
||||
|
|
@ -0,0 +1,339 @@
|
|||
// Package inputunifi implements the poller.Input interface and bridges the gap between
|
||||
// metrics from the unifi library, and the augments required to pump them into unifi-poller.
|
||||
package inputunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
// PluginName is the name of this input plugin.
|
||||
const PluginName = "unifi"
|
||||
|
||||
const (
|
||||
defaultURL = "https://127.0.0.1:8443"
|
||||
defaultUser = "unifipoller"
|
||||
defaultPass = "unifipoller"
|
||||
defaultSite = "all"
|
||||
)
|
||||
|
||||
// InputUnifi contains the running data.
|
||||
type InputUnifi struct {
|
||||
*Config `json:"unifi" toml:"unifi" xml:"unifi" yaml:"unifi"`
|
||||
dynamic map[string]*Controller
|
||||
sync.Mutex // to lock the map above.
|
||||
Logger poller.Logger
|
||||
}
|
||||
|
||||
// Controller represents the configuration for a UniFi Controller.
|
||||
// Each polled controller may have its own configuration.
|
||||
type Controller struct {
|
||||
VerifySSL *bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
|
||||
SaveAnomal *bool `json:"save_anomalies" toml:"save_anomalies" xml:"save_anomalies" yaml:"save_anomalies"`
|
||||
SaveAlarms *bool `json:"save_alarms" toml:"save_alarms" xml:"save_alarms" yaml:"save_alarms"`
|
||||
SaveEvents *bool `json:"save_events" toml:"save_events" xml:"save_events" yaml:"save_events"`
|
||||
SaveIDS *bool `json:"save_ids" toml:"save_ids" xml:"save_ids" yaml:"save_ids"`
|
||||
SaveDPI *bool `json:"save_dpi" toml:"save_dpi" xml:"save_dpi" yaml:"save_dpi"`
|
||||
SaveRogue *bool `json:"save_rogue" toml:"save_rogue" xml:"save_rogue" yaml:"save_rogue"`
|
||||
HashPII *bool `json:"hash_pii" toml:"hash_pii" xml:"hash_pii" yaml:"hash_pii"`
|
||||
SaveSites *bool `json:"save_sites" toml:"save_sites" xml:"save_sites" yaml:"save_sites"`
|
||||
CertPaths []string `json:"ssl_cert_paths" toml:"ssl_cert_paths" xml:"ssl_cert_path" yaml:"ssl_cert_paths"`
|
||||
User string `json:"user" toml:"user" xml:"user" yaml:"user"`
|
||||
Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
|
||||
URL string `json:"url" toml:"url" xml:"url" yaml:"url"`
|
||||
Sites []string `json:"sites" toml:"sites" xml:"site" yaml:"sites"`
|
||||
Unifi *unifi.Unifi `json:"-" toml:"-" xml:"-" yaml:"-"`
|
||||
ID string `json:"id,omitempty"` // this is an output, not an input.
|
||||
}
|
||||
|
||||
// Config contains our configuration data.
|
||||
type Config struct {
|
||||
sync.RWMutex // locks the Unifi struct member when re-authing to unifi.
|
||||
Default Controller `json:"defaults" toml:"defaults" xml:"default" yaml:"defaults"`
|
||||
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
|
||||
Dynamic bool `json:"dynamic" toml:"dynamic" xml:"dynamic,attr" yaml:"dynamic"`
|
||||
Controllers []*Controller `json:"controllers" toml:"controller" xml:"controller" yaml:"controllers"`
|
||||
}
|
||||
|
||||
// Metrics is simply a useful container for everything.
|
||||
type Metrics struct {
|
||||
TS time.Time
|
||||
Sites []*unifi.Site
|
||||
Clients []*unifi.Client
|
||||
SitesDPI []*unifi.DPITable
|
||||
ClientsDPI []*unifi.DPITable
|
||||
RogueAPs []*unifi.RogueAP
|
||||
Devices *unifi.Devices
|
||||
}
|
||||
|
||||
func init() { // nolint: gochecknoinits
|
||||
u := &InputUnifi{
|
||||
dynamic: make(map[string]*Controller),
|
||||
}
|
||||
|
||||
poller.NewInput(&poller.InputPlugin{
|
||||
Name: PluginName,
|
||||
Input: u, // this library implements poller.Input interface for Metrics().
|
||||
Config: u, // Defines our config data interface.
|
||||
})
|
||||
}
|
||||
|
||||
// getCerts reads in cert files from disk and stores them as a slice of of byte slices.
|
||||
func (c *Controller) getCerts() ([][]byte, error) {
|
||||
if len(c.CertPaths) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b := make([][]byte, len(c.CertPaths))
|
||||
|
||||
for i, f := range c.CertPaths {
|
||||
d, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading SSL cert file: %w", err)
|
||||
}
|
||||
|
||||
b[i] = d
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// getUnifi (re-)authenticates to a unifi controller.
|
||||
// If certificate files are provided, they are re-read.
|
||||
func (u *InputUnifi) getUnifi(c *Controller) error {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
|
||||
if c.Unifi != nil {
|
||||
c.Unifi.CloseIdleConnections()
|
||||
}
|
||||
|
||||
certs, err := c.getCerts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create an authenticated session to the Unifi Controller.
|
||||
c.Unifi, err = unifi.NewUnifi(&unifi.Config{
|
||||
User: c.User,
|
||||
Pass: c.Pass,
|
||||
URL: c.URL,
|
||||
SSLCert: certs,
|
||||
VerifySSL: *c.VerifySSL,
|
||||
ErrorLog: u.LogErrorf, // Log all errors.
|
||||
DebugLog: u.LogDebugf, // Log debug messages.
|
||||
})
|
||||
if err != nil {
|
||||
c.Unifi = nil
|
||||
return fmt.Errorf("unifi controller: %w", err)
|
||||
}
|
||||
|
||||
u.LogDebugf("Authenticated with controller successfully, %s", c.URL)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkSites makes sure the list of provided sites exists on the controller.
|
||||
// This only runs once during initialization.
|
||||
func (u *InputUnifi) checkSites(c *Controller) error {
|
||||
u.RLock()
|
||||
defer u.RUnlock()
|
||||
|
||||
if len(c.Sites) == 0 || c.Sites[0] == "" {
|
||||
c.Sites = []string{"all"}
|
||||
}
|
||||
|
||||
u.LogDebugf("Checking Controller Sites List")
|
||||
|
||||
sites, err := c.Unifi.GetSites()
|
||||
if err != nil {
|
||||
return fmt.Errorf("controller: %w", err)
|
||||
}
|
||||
|
||||
msg := []string{}
|
||||
for _, site := range sites {
|
||||
msg = append(msg, site.Name+" ("+site.Desc+")")
|
||||
}
|
||||
|
||||
u.Logf("Found %d site(s) on controller %s: %v", len(msg), c.URL, strings.Join(msg, ", "))
|
||||
|
||||
if StringInSlice("all", c.Sites) {
|
||||
c.Sites = []string{"all"}
|
||||
return nil
|
||||
}
|
||||
|
||||
keep := []string{}
|
||||
|
||||
FIRST:
|
||||
for _, s := range c.Sites {
|
||||
for _, site := range sites {
|
||||
if s == site.Name {
|
||||
keep = append(keep, s)
|
||||
continue FIRST
|
||||
}
|
||||
}
|
||||
u.LogErrorf("Configured site not found on controller %s: %v", c.URL, s)
|
||||
}
|
||||
|
||||
if c.Sites = keep; len(keep) == 0 {
|
||||
c.Sites = []string{"all"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) getPassFromFile(filename string) string {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
u.LogErrorf("Reading UniFi Password File: %v", err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(b))
|
||||
}
|
||||
|
||||
// setDefaults sets the default defaults.
|
||||
func (u *InputUnifi) setDefaults(c *Controller) { //nolint:cyclop
|
||||
t := true
|
||||
f := false
|
||||
|
||||
// Default defaults.
|
||||
if c.SaveSites == nil {
|
||||
c.SaveSites = &t
|
||||
}
|
||||
|
||||
if c.VerifySSL == nil {
|
||||
c.VerifySSL = &f
|
||||
}
|
||||
|
||||
if c.HashPII == nil {
|
||||
c.HashPII = &f
|
||||
}
|
||||
|
||||
if c.SaveDPI == nil {
|
||||
c.SaveDPI = &f
|
||||
}
|
||||
|
||||
if c.SaveRogue == nil {
|
||||
c.SaveRogue = &f
|
||||
}
|
||||
|
||||
if c.SaveIDS == nil {
|
||||
c.SaveIDS = &f
|
||||
}
|
||||
|
||||
if c.SaveEvents == nil {
|
||||
c.SaveEvents = &f
|
||||
}
|
||||
|
||||
if c.SaveAlarms == nil {
|
||||
c.SaveAlarms = &f
|
||||
}
|
||||
|
||||
if c.SaveAnomal == nil {
|
||||
c.SaveAnomal = &f
|
||||
}
|
||||
|
||||
if c.URL == "" {
|
||||
c.URL = defaultURL
|
||||
}
|
||||
|
||||
if strings.HasPrefix(c.Pass, "file://") {
|
||||
c.Pass = u.getPassFromFile(strings.TrimPrefix(c.Pass, "file://"))
|
||||
}
|
||||
|
||||
if c.Pass == "" {
|
||||
c.Pass = defaultPass
|
||||
}
|
||||
|
||||
if c.User == "" {
|
||||
c.User = defaultUser
|
||||
}
|
||||
|
||||
if len(c.Sites) == 0 {
|
||||
c.Sites = []string{defaultSite}
|
||||
}
|
||||
}
|
||||
|
||||
// setControllerDefaults sets defaults for the for controllers.
|
||||
// Any missing values come from defaults (above).
|
||||
func (u *InputUnifi) setControllerDefaults(c *Controller) *Controller { //nolint:cyclop,funlen
|
||||
// Configured controller defaults.
|
||||
if c.SaveSites == nil {
|
||||
c.SaveSites = u.Default.SaveSites
|
||||
}
|
||||
|
||||
if c.VerifySSL == nil {
|
||||
c.VerifySSL = u.Default.VerifySSL
|
||||
}
|
||||
|
||||
if c.CertPaths == nil {
|
||||
c.CertPaths = u.Default.CertPaths
|
||||
}
|
||||
|
||||
if c.HashPII == nil {
|
||||
c.HashPII = u.Default.HashPII
|
||||
}
|
||||
|
||||
if c.SaveDPI == nil {
|
||||
c.SaveDPI = u.Default.SaveDPI
|
||||
}
|
||||
|
||||
if c.SaveIDS == nil {
|
||||
c.SaveIDS = u.Default.SaveIDS
|
||||
}
|
||||
|
||||
if c.SaveRogue == nil {
|
||||
c.SaveRogue = u.Default.SaveRogue
|
||||
}
|
||||
|
||||
if c.SaveEvents == nil {
|
||||
c.SaveEvents = u.Default.SaveEvents
|
||||
}
|
||||
|
||||
if c.SaveAlarms == nil {
|
||||
c.SaveAlarms = u.Default.SaveAlarms
|
||||
}
|
||||
|
||||
if c.SaveAnomal == nil {
|
||||
c.SaveAnomal = u.Default.SaveAnomal
|
||||
}
|
||||
|
||||
if c.URL == "" {
|
||||
c.URL = u.Default.URL
|
||||
}
|
||||
|
||||
if strings.HasPrefix(c.Pass, "file://") {
|
||||
c.Pass = u.getPassFromFile(strings.TrimPrefix(c.Pass, "file://"))
|
||||
}
|
||||
|
||||
if c.Pass == "" {
|
||||
c.Pass = u.Default.Pass
|
||||
}
|
||||
|
||||
if c.User == "" {
|
||||
c.User = u.Default.User
|
||||
}
|
||||
|
||||
if len(c.Sites) == 0 {
|
||||
c.Sites = u.Default.Sites
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// StringInSlice returns true if a string is in a slice.
|
||||
func StringInSlice(str string, slice []string) bool {
|
||||
for _, s := range slice {
|
||||
if strings.EqualFold(s, str) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,202 @@
|
|||
package inputunifi
|
||||
|
||||
/* This file contains the three poller.Input interface methods. */
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDynamicLookupsDisabled = fmt.Errorf("filter path requested but dynamic lookups disabled")
|
||||
ErrControllerNumNotFound = fmt.Errorf("controller number not found")
|
||||
ErrNoFilterKindProvided = fmt.Errorf("must provide filter: devices, clients, other")
|
||||
)
|
||||
|
||||
// Initialize gets called one time when starting up.
|
||||
// Satisfies poller.Input interface.
|
||||
func (u *InputUnifi) Initialize(l poller.Logger) error {
|
||||
if u.Config == nil {
|
||||
u.Config = &Config{Disable: true}
|
||||
}
|
||||
|
||||
if u.Logger = l; u.Disable {
|
||||
u.Logf("UniFi input plugin disabled or missing configuration!")
|
||||
return nil
|
||||
}
|
||||
|
||||
if u.setDefaults(&u.Default); len(u.Controllers) == 0 && !u.Dynamic {
|
||||
u.Controllers = []*Controller{&u.Default}
|
||||
}
|
||||
|
||||
if len(u.Controllers) == 0 {
|
||||
u.Logf("No controllers configured. Polling dynamic controllers only! Defaults:")
|
||||
u.logController(&u.Default)
|
||||
}
|
||||
|
||||
for i, c := range u.Controllers {
|
||||
if err := u.getUnifi(u.setControllerDefaults(c)); err != nil {
|
||||
u.LogErrorf("Controller %d of %d Auth or Connection Error, retrying: %v", i+1, len(u.Controllers), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := u.checkSites(c); err != nil {
|
||||
u.LogErrorf("checking sites on %s: %v", c.URL, err)
|
||||
}
|
||||
|
||||
u.Logf("Configured UniFi Controller %d of %d:", i+1, len(u.Controllers))
|
||||
u.logController(c)
|
||||
}
|
||||
|
||||
webserver.UpdateInput(&webserver.Input{Name: PluginName, Config: formatConfig(u.Config)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *InputUnifi) logController(c *Controller) {
|
||||
u.Logf(" => URL: %s (verify SSL: %v)", c.URL, *c.VerifySSL)
|
||||
|
||||
if len(c.CertPaths) > 0 {
|
||||
u.Logf(" => Cert Files: %s", strings.Join(c.CertPaths, ", "))
|
||||
}
|
||||
|
||||
if c.Unifi != nil {
|
||||
u.Logf(" => Version: %s (%s)", c.Unifi.ServerVersion, c.Unifi.UUID)
|
||||
}
|
||||
|
||||
u.Logf(" => Username: %s (has password: %v)", c.User, c.Pass != "")
|
||||
u.Logf(" => Hash PII / Poll Sites: %v / %s", *c.HashPII, strings.Join(c.Sites, ", "))
|
||||
u.Logf(" => Save Sites / Save DPI: %v / %v (metrics)", *c.SaveSites, *c.SaveDPI)
|
||||
u.Logf(" => Save Events / Save IDS: %v / %v (logs)", *c.SaveEvents, *c.SaveIDS)
|
||||
u.Logf(" => Save Alarms / Anomalies: %v / %v (logs)", *c.SaveAlarms, *c.SaveAnomal)
|
||||
u.Logf(" => Save Rogue APs: %v", *c.SaveRogue)
|
||||
}
|
||||
|
||||
// Events allows you to pull only events (and IDS) from the UniFi Controller.
|
||||
// This does not fully respect HashPII, but it may in the future!
|
||||
// Use Filter.Path to pick a specific controller, otherwise poll them all!
|
||||
func (u *InputUnifi) Events(filter *poller.Filter) (*poller.Events, error) {
|
||||
if u.Disable {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logs := []interface{}{}
|
||||
|
||||
if filter == nil {
|
||||
filter = &poller.Filter{}
|
||||
}
|
||||
|
||||
for _, c := range u.Controllers {
|
||||
if filter.Path != "" && !strings.EqualFold(c.URL, filter.Path) {
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := u.collectControllerEvents(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logs = append(logs, events...)
|
||||
}
|
||||
|
||||
return &poller.Events{Logs: logs}, nil
|
||||
}
|
||||
|
||||
// Metrics grabs all the measurements from a UniFi controller and returns them.
|
||||
// Set Filter.Path to a controller URL for a specific controller (or get them all).
|
||||
func (u *InputUnifi) Metrics(filter *poller.Filter) (*poller.Metrics, error) {
|
||||
if u.Disable {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
metrics := &poller.Metrics{}
|
||||
|
||||
if filter == nil {
|
||||
filter = &poller.Filter{}
|
||||
}
|
||||
|
||||
// Check if the request is for an existing, configured controller (or all controllers)
|
||||
for _, c := range u.Controllers {
|
||||
if filter.Path != "" && !strings.EqualFold(c.URL, filter.Path) {
|
||||
// continue only if we have a filter path and it doesn't match.
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := u.collectController(c)
|
||||
if err != nil {
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metrics = poller.AppendMetrics(metrics, m)
|
||||
}
|
||||
|
||||
if filter.Path == "" || len(metrics.Clients) != 0 {
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
if !u.Dynamic {
|
||||
return nil, ErrDynamicLookupsDisabled
|
||||
}
|
||||
|
||||
// Attempt a dynamic metrics fetch from an unconfigured controller.
|
||||
return u.dynamicController(filter)
|
||||
}
|
||||
|
||||
// RawMetrics returns API output from the first configured UniFi controller.
|
||||
// Adjust filter.Unit to pull from a controller other than the first.
|
||||
func (u *InputUnifi) RawMetrics(filter *poller.Filter) ([]byte, error) {
|
||||
if l := len(u.Controllers); filter.Unit >= l {
|
||||
return nil, fmt.Errorf("%d controller(s) configured, '%d': %w", l, filter.Unit, ErrControllerNumNotFound)
|
||||
}
|
||||
|
||||
c := u.Controllers[filter.Unit]
|
||||
if u.isNill(c) {
|
||||
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
|
||||
|
||||
if err := u.getUnifi(c); err != nil {
|
||||
return nil, fmt.Errorf("re-authenticating to %s: %w", c.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := u.checkSites(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sites, err := u.getFilteredSites(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch filter.Kind {
|
||||
case "d", "device", "devices":
|
||||
return u.getSitesJSON(c, unifi.APIDevicePath, sites)
|
||||
case "client", "clients", "c":
|
||||
return u.getSitesJSON(c, unifi.APIClientPath, sites)
|
||||
case "other", "o":
|
||||
return c.Unifi.GetJSON(filter.Path)
|
||||
default:
|
||||
return []byte{}, ErrNoFilterKindProvided
|
||||
}
|
||||
}
|
||||
|
||||
func (u *InputUnifi) getSitesJSON(c *Controller, path string, sites []*unifi.Site) ([]byte, error) {
|
||||
allJSON := []byte{}
|
||||
|
||||
for _, s := range sites {
|
||||
apiPath := fmt.Sprintf(path, s.Name)
|
||||
u.LogDebugf("Returning Path '%s' for site: %s (%s):\n", apiPath, s.Desc, s.Name)
|
||||
|
||||
body, err := c.Unifi.GetJSON(apiPath)
|
||||
if err != nil {
|
||||
return allJSON, fmt.Errorf("controller: %w", err)
|
||||
}
|
||||
|
||||
allJSON = append(allJSON, body...)
|
||||
}
|
||||
|
||||
return allJSON, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,214 @@
|
|||
package inputunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
/* This code reformats our data to be displayed on the built-in web interface. */
|
||||
|
||||
func updateWeb(c *Controller, metrics *Metrics) {
|
||||
webserver.UpdateInput(&webserver.Input{
|
||||
Name: PluginName, // Forgetting this leads to 3 hours of head scratching.
|
||||
Sites: formatSites(c, metrics.Sites),
|
||||
Clients: formatClients(c, metrics.Clients),
|
||||
Devices: formatDevices(c, metrics.Devices),
|
||||
})
|
||||
}
|
||||
|
||||
func formatConfig(config *Config) *Config {
|
||||
return &Config{
|
||||
Default: *formatControllers([]*Controller{&config.Default})[0],
|
||||
Disable: config.Disable,
|
||||
Dynamic: config.Dynamic,
|
||||
Controllers: formatControllers(config.Controllers),
|
||||
}
|
||||
}
|
||||
|
||||
func formatControllers(controllers []*Controller) []*Controller {
|
||||
fixed := []*Controller{}
|
||||
|
||||
for _, c := range controllers {
|
||||
id := ""
|
||||
if c.Unifi != nil {
|
||||
id = c.Unifi.UUID
|
||||
}
|
||||
|
||||
fixed = append(fixed, &Controller{
|
||||
VerifySSL: c.VerifySSL,
|
||||
SaveAnomal: c.SaveAnomal,
|
||||
SaveAlarms: c.SaveAlarms,
|
||||
SaveRogue: c.SaveRogue,
|
||||
SaveEvents: c.SaveEvents,
|
||||
SaveIDS: c.SaveIDS,
|
||||
SaveDPI: c.SaveDPI,
|
||||
HashPII: c.HashPII,
|
||||
SaveSites: c.SaveSites,
|
||||
User: c.User,
|
||||
Pass: strconv.FormatBool(c.Pass != ""),
|
||||
URL: c.URL,
|
||||
Sites: c.Sites,
|
||||
ID: id,
|
||||
})
|
||||
}
|
||||
|
||||
return fixed
|
||||
}
|
||||
|
||||
func formatSites(c *Controller, sites []*unifi.Site) (s webserver.Sites) {
|
||||
for _, site := range sites {
|
||||
s = append(s, &webserver.Site{
|
||||
ID: site.ID,
|
||||
Name: site.Name,
|
||||
Desc: site.Desc,
|
||||
Source: site.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
})
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func formatClients(c *Controller, clients []*unifi.Client) (d webserver.Clients) {
|
||||
for _, client := range clients {
|
||||
clientType, deviceMAC := "unknown", "unknown"
|
||||
if client.ApMac != "" {
|
||||
clientType = "wireless"
|
||||
deviceMAC = client.ApMac
|
||||
} else if client.SwMac != "" {
|
||||
clientType = "wired"
|
||||
deviceMAC = client.SwMac
|
||||
}
|
||||
|
||||
if deviceMAC == "" {
|
||||
deviceMAC = client.GwMac
|
||||
}
|
||||
|
||||
d = append(d, &webserver.Client{
|
||||
Name: client.Name,
|
||||
SiteID: client.SiteID,
|
||||
Source: client.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
MAC: client.Mac,
|
||||
IP: client.IP,
|
||||
Type: clientType,
|
||||
DeviceMAC: deviceMAC,
|
||||
Rx: client.RxBytes,
|
||||
Tx: client.TxBytes,
|
||||
Since: time.Unix(client.FirstSeen, 0),
|
||||
Last: time.Unix(client.LastSeen, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func formatDevices(c *Controller, devices *unifi.Devices) (d webserver.Devices) { // nolint: funlen
|
||||
if devices == nil {
|
||||
return d
|
||||
}
|
||||
|
||||
for _, device := range devices.UAPs {
|
||||
d = append(d, &webserver.Device{
|
||||
Name: device.Name,
|
||||
SiteID: device.SiteID,
|
||||
Source: device.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
MAC: device.Mac,
|
||||
IP: device.IP,
|
||||
Type: device.Type,
|
||||
Model: device.Model,
|
||||
Version: device.Version,
|
||||
Uptime: int(device.Uptime.Val),
|
||||
Clients: int(device.NumSta.Val),
|
||||
Config: nil,
|
||||
})
|
||||
}
|
||||
|
||||
for _, device := range devices.UDMs {
|
||||
d = append(d, &webserver.Device{
|
||||
Name: device.Name,
|
||||
SiteID: device.SiteID,
|
||||
Source: device.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
MAC: device.Mac,
|
||||
IP: device.IP,
|
||||
Type: device.Type,
|
||||
Model: device.Model,
|
||||
Version: device.Version,
|
||||
Uptime: int(device.Uptime.Val),
|
||||
Clients: int(device.NumSta.Val),
|
||||
Config: nil,
|
||||
})
|
||||
}
|
||||
|
||||
for _, device := range devices.USWs {
|
||||
d = append(d, &webserver.Device{
|
||||
Name: device.Name,
|
||||
SiteID: device.SiteID,
|
||||
Source: device.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
MAC: device.Mac,
|
||||
IP: device.IP,
|
||||
Type: device.Type,
|
||||
Model: device.Model,
|
||||
Version: device.Version,
|
||||
Uptime: int(device.Uptime.Val),
|
||||
Clients: int(device.NumSta.Val),
|
||||
Config: nil,
|
||||
})
|
||||
}
|
||||
|
||||
for _, device := range devices.USGs {
|
||||
d = append(d, &webserver.Device{
|
||||
Name: device.Name,
|
||||
SiteID: device.SiteID,
|
||||
Source: device.SourceName,
|
||||
Controller: c.Unifi.UUID,
|
||||
MAC: device.Mac,
|
||||
IP: device.IP,
|
||||
Type: device.Type,
|
||||
Model: device.Model,
|
||||
Version: device.Version,
|
||||
Uptime: int(device.Uptime.Val),
|
||||
Clients: int(device.NumSta.Val),
|
||||
Config: nil,
|
||||
})
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Logf logs a message.
|
||||
func (u *InputUnifi) Logf(msg string, v ...interface{}) {
|
||||
webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "info"},
|
||||
})
|
||||
u.Logger.Logf(msg, v...)
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (u *InputUnifi) LogErrorf(msg string, v ...interface{}) {
|
||||
webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "error"},
|
||||
})
|
||||
u.Logger.LogErrorf(msg, v...)
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (u *InputUnifi) LogDebugf(msg string, v ...interface{}) {
|
||||
webserver.NewInputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "debug"},
|
||||
})
|
||||
u.Logger.LogDebugf(msg, v...)
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2020-2021 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
# lokiunifi
|
||||
|
||||
Loki Output Plugin for UnPoller
|
||||
|
||||
This plugin writes UniFi Events and IDS data to Loki. Maybe Alarms too.
|
||||
|
||||
Example Config:
|
||||
|
||||
```toml
|
||||
[loki]
|
||||
# URL is the only required setting for Loki.
|
||||
url = "http://192.168.3.2:3100"
|
||||
|
||||
# How often to poll UniFi and report to Loki.
|
||||
interval = "2m"
|
||||
|
||||
# How long to wait for Loki responses.
|
||||
timeout = "5s"
|
||||
|
||||
# Set these to use basic auth.
|
||||
#user = ""
|
||||
#pass = ""
|
||||
|
||||
# Used for auth-less multi-tenant.
|
||||
#tenant_id = ""
|
||||
```
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
lokiPushPath = "/loki/api/v1/push"
|
||||
)
|
||||
|
||||
var errStatusCode = fmt.Errorf("unexpected HTTP status code")
|
||||
|
||||
// Client holds the http client for contacting Loki.
|
||||
type Client struct {
|
||||
*Config
|
||||
*http.Client
|
||||
}
|
||||
|
||||
func (l *Loki) httpClient() *Client {
|
||||
return &Client{
|
||||
Config: l.Config,
|
||||
Client: &http.Client{
|
||||
Timeout: l.Timeout.Duration,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: !l.VerifySSL, // nolint: gosec
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Post marshals and posts a batch of log messages.
|
||||
func (c *Client) Post(logs interface{}) error {
|
||||
msg, err := json.Marshal(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("json marshal: %w", err)
|
||||
}
|
||||
|
||||
u := strings.TrimSuffix(c.URL, lokiPushPath) + lokiPushPath
|
||||
|
||||
req, err := c.NewRequest(u, "POST", "application/json", msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if code, body, err := c.Do(req); err != nil {
|
||||
return err
|
||||
} else if code != http.StatusNoContent {
|
||||
m := fmt.Sprintf("%s (%d/%s) %s, msg: %s", u, code, http.StatusText(code),
|
||||
strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " ")), msg)
|
||||
|
||||
return fmt.Errorf("%s: %w", m, errStatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRequest creates the http request based on input data.
|
||||
func (c *Client) NewRequest(url, method, cType string, msg []byte) (*http.Request, error) {
|
||||
req, err := http.NewRequest(method, url, bytes.NewBuffer(msg)) //nolint:noctx
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating request: %w", err)
|
||||
}
|
||||
|
||||
if cType != "" {
|
||||
req.Header.Set("Content-Type", cType)
|
||||
}
|
||||
|
||||
if c.Username != "" || c.Password != "" {
|
||||
req.SetBasicAuth(c.Username, c.Password)
|
||||
}
|
||||
|
||||
if c.TenantID != "" {
|
||||
req.Header.Set("X-Scope-OrgID", c.TenantID)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Do makes an http request and returns the status code, body and/or an error.
|
||||
func (c *Client) Do(req *http.Request) (int, []byte, error) {
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("making request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return resp.StatusCode, body, fmt.Errorf("reading body: %w", err)
|
||||
}
|
||||
|
||||
return resp.StatusCode, body, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
// Logf logs a message.
|
||||
func (l *Loki) Logf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "info"},
|
||||
})
|
||||
l.Collect.Logf(msg, v...)
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (l *Loki) LogErrorf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "error"},
|
||||
})
|
||||
l.Collect.LogErrorf(msg, v...)
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (l *Loki) LogDebugf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "debug"},
|
||||
})
|
||||
l.Collect.LogDebugf(msg, v...)
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
"golift.io/cnfg"
|
||||
)
|
||||
|
||||
const (
|
||||
maxInterval = 10 * time.Minute
|
||||
minInterval = 10 * time.Second
|
||||
defaultTimeout = 10 * time.Second
|
||||
defaultInterval = 2 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
// InputName is the name of plugin that gives us data.
|
||||
InputName = "unifi"
|
||||
// PluginName is the name of this plugin.
|
||||
PluginName = "loki"
|
||||
)
|
||||
|
||||
// Config is the plugin's input data.
|
||||
type Config struct {
|
||||
Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
|
||||
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
|
||||
URL string `json:"url" toml:"url" xml:"url" yaml:"url"`
|
||||
Username string `json:"user" toml:"user" xml:"user" yaml:"user"`
|
||||
Password string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
|
||||
TenantID string `json:"tenant_id" toml:"tenant_id" xml:"tenant_id" yaml:"tenant_id"`
|
||||
Interval cnfg.Duration `json:"interval" toml:"interval" xml:"interval" yaml:"interval"`
|
||||
Timeout cnfg.Duration `json:"timeout" toml:"timeout" xml:"timeout" yaml:"timeout"`
|
||||
}
|
||||
|
||||
// Loki is the main library struct. This satisfies the poller.Output interface.
|
||||
type Loki struct {
|
||||
Collect poller.Collect
|
||||
*Config `json:"loki" toml:"loki" xml:"loki" yaml:"loki"`
|
||||
client *Client
|
||||
last time.Time
|
||||
}
|
||||
|
||||
// init is how this modular code is initialized by the main app.
|
||||
// This module adds itself as an output module to the poller core.
|
||||
func init() { // nolint: gochecknoinits
|
||||
l := &Loki{Config: &Config{
|
||||
Interval: cnfg.Duration{Duration: defaultInterval},
|
||||
Timeout: cnfg.Duration{Duration: defaultTimeout},
|
||||
}}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: PluginName,
|
||||
Config: l,
|
||||
Method: l.Run,
|
||||
})
|
||||
}
|
||||
|
||||
// Run is fired from the poller library after the Config is unmarshalled.
|
||||
func (l *Loki) Run(collect poller.Collect) error {
|
||||
if l.Collect = collect; l.Config == nil || l.URL == "" || l.Disable {
|
||||
l.Logf("Loki config missing (or disabled), Loki output disabled!")
|
||||
return nil
|
||||
}
|
||||
|
||||
l.ValidateConfig()
|
||||
|
||||
fake := *l.Config
|
||||
fake.Password = strconv.FormatBool(fake.Password != "")
|
||||
|
||||
webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: fake})
|
||||
l.PollController()
|
||||
l.LogErrorf("Loki Output Plugin Stopped!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConfig sets initial "last" update time. Also creates an http client,
|
||||
// makes sure URL is sane, and sets interval within min/max limits.
|
||||
func (l *Loki) ValidateConfig() {
|
||||
if l.Interval.Duration > maxInterval {
|
||||
l.Interval.Duration = maxInterval
|
||||
} else if l.Interval.Duration < minInterval {
|
||||
l.Interval.Duration = minInterval
|
||||
}
|
||||
|
||||
if strings.HasPrefix(l.Password, "file://") {
|
||||
pass, err := os.ReadFile(strings.TrimPrefix(l.Password, "file://"))
|
||||
if err != nil {
|
||||
l.LogErrorf("Reading Loki Password File: %v", err)
|
||||
}
|
||||
|
||||
l.Password = strings.TrimSpace(string(pass))
|
||||
}
|
||||
|
||||
l.last = time.Now().Add(-l.Interval.Duration)
|
||||
l.client = l.httpClient()
|
||||
l.URL = strings.TrimRight(l.URL, "/") // gets a path appended to it later.
|
||||
}
|
||||
|
||||
// PollController runs forever, polling UniFi for events and pushing them to Loki.
|
||||
// This is started by Run().
|
||||
func (l *Loki) PollController() {
|
||||
interval := l.Interval.Round(time.Second)
|
||||
l.Logf("Loki Event collection started, interval: %v, URL: %s", interval, l.URL)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
for start := range ticker.C {
|
||||
events, err := l.Collect.Events(&poller.Filter{Name: InputName})
|
||||
if err != nil {
|
||||
l.LogErrorf("event fetch for Loki failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = l.ProcessEvents(l.NewReport(start), events)
|
||||
if err != nil {
|
||||
l.LogErrorf("%v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessEvents offloads some of the loop from PollController.
|
||||
func (l *Loki) ProcessEvents(report *Report, events *poller.Events) error {
|
||||
// Sometimes it gets stuck on old messages. This gets it past that.
|
||||
if time.Since(l.last) > 4*l.Interval.Duration {
|
||||
l.last = time.Now().Add(-4 * l.Interval.Duration)
|
||||
}
|
||||
|
||||
logs := report.ProcessEventLogs(events)
|
||||
if err := l.client.Post(logs); err != nil {
|
||||
return fmt.Errorf("sending to Loki failed: %w", err)
|
||||
}
|
||||
|
||||
l.last = report.Start
|
||||
l.Logf("Events sent to Loki. %v", report)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
// LogStream contains a stream of logs (like a log file).
|
||||
// This app uses one stream per log entry because each log may have different labels.
|
||||
type LogStream struct {
|
||||
Labels map[string]string `json:"stream"` // "the file name"
|
||||
Entries [][]string `json:"values"` // "the log lines"
|
||||
}
|
||||
|
||||
// Logs is the main logs-holding structure. This is the Loki-output format.
|
||||
type Logs struct {
|
||||
Streams []LogStream `json:"streams"` // "multiple files"
|
||||
}
|
||||
|
||||
// Report is the temporary data generated by processing events.
|
||||
type Report struct {
|
||||
Start time.Time
|
||||
Oldest time.Time
|
||||
poller.Logger
|
||||
Counts map[string]int
|
||||
}
|
||||
|
||||
// NewReport makes a new report.
|
||||
func (l *Loki) NewReport(start time.Time) *Report {
|
||||
return &Report{
|
||||
Start: start,
|
||||
Oldest: l.last,
|
||||
Logger: l,
|
||||
Counts: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessEventLogs loops the event Logs, matches the interface type, calls the
|
||||
// appropriate method for the data, and compiles the Logs into a Loki format.
|
||||
// This runs once per interval, if there was no collection error.
|
||||
func (r *Report) ProcessEventLogs(events *poller.Events) *Logs {
|
||||
logs := &Logs{}
|
||||
|
||||
for _, e := range events.Logs {
|
||||
switch event := e.(type) {
|
||||
case *unifi.IDS:
|
||||
r.IDS(event, logs)
|
||||
case *unifi.Event:
|
||||
r.Event(event, logs)
|
||||
case *unifi.Alarm:
|
||||
r.Alarm(event, logs)
|
||||
case *unifi.Anomaly:
|
||||
r.Anomaly(event, logs)
|
||||
default: // unlikely.
|
||||
r.LogErrorf("unknown event type: %T", e)
|
||||
}
|
||||
}
|
||||
|
||||
return logs
|
||||
}
|
||||
|
||||
func (r *Report) String() string {
|
||||
return fmt.Sprintf("%s: %d, %s: %d, %s: %d, %s: %d, Dur: %v",
|
||||
typeEvent, r.Counts[typeEvent], typeIDS, r.Counts[typeIDS],
|
||||
typeAlarm, r.Counts[typeAlarm], typeAnomaly, r.Counts[typeAnomaly],
|
||||
time.Since(r.Start).Round(time.Millisecond))
|
||||
}
|
||||
|
||||
// CleanLabels removes any tag that is empty.
|
||||
func CleanLabels(labels map[string]string) map[string]string {
|
||||
for i := range labels {
|
||||
if strings.TrimSpace(labels[i]) == "" {
|
||||
delete(labels, i)
|
||||
}
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const typeAlarm = "Alarm"
|
||||
|
||||
// Alarm stores a structured Alarm for batch sending to Loki.
|
||||
func (r *Report) Alarm(event *unifi.Alarm, logs *Logs) {
|
||||
if event.Datetime.Before(r.Oldest) {
|
||||
return
|
||||
}
|
||||
|
||||
r.Counts[typeAlarm]++ // increase counter and append new log line.
|
||||
|
||||
logs.Streams = append(logs.Streams, LogStream{
|
||||
Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
|
||||
Labels: CleanLabels(map[string]string{
|
||||
"application": "unifi_alarm",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"subsystem": event.Subsystem,
|
||||
"category": event.Catname,
|
||||
"event_type": event.EventType,
|
||||
"key": event.Key,
|
||||
"app_protocol": event.AppProto,
|
||||
"protocol": event.Proto,
|
||||
"interface": event.InIface,
|
||||
"src_country": event.SrcIPCountry,
|
||||
"usgip": event.USGIP,
|
||||
"action": event.InnerAlertAction,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const typeAnomaly = "Anomaly"
|
||||
|
||||
// Anomaly stores a structured Anomaly for batch sending to Loki.
|
||||
func (r *Report) Anomaly(event *unifi.Anomaly, logs *Logs) {
|
||||
if event.Datetime.Before(r.Oldest) {
|
||||
return
|
||||
}
|
||||
|
||||
r.Counts[typeAnomaly]++ // increase counter and append new log line.
|
||||
|
||||
logs.Streams = append(logs.Streams, LogStream{
|
||||
Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Anomaly}},
|
||||
Labels: CleanLabels(map[string]string{
|
||||
"application": "unifi_anomaly",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"device_mac": event.DeviceMAC,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const typeEvent = "Event"
|
||||
|
||||
// Event stores a structured UniFi Event for batch sending to Loki.
|
||||
func (r *Report) Event(event *unifi.Event, logs *Logs) {
|
||||
if event.Datetime.Before(r.Oldest) {
|
||||
return
|
||||
}
|
||||
|
||||
r.Counts[typeEvent]++ // increase counter and append new log line.
|
||||
|
||||
logs.Streams = append(logs.Streams, LogStream{
|
||||
Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
|
||||
Labels: CleanLabels(map[string]string{
|
||||
"application": "unifi_event",
|
||||
"admin": event.Admin, // username
|
||||
"site_name": event.SiteName,
|
||||
"source": event.SourceName,
|
||||
"subsystem": event.Subsystem,
|
||||
"ap_from": event.ApFrom,
|
||||
"ap_to": event.ApTo,
|
||||
"ap": event.Ap,
|
||||
"ap_name": event.ApName,
|
||||
"gw": event.Gw,
|
||||
"gw_name": event.GwName,
|
||||
"sw": event.Sw,
|
||||
"sw_name": event.SwName,
|
||||
"category": event.Catname,
|
||||
"radio": event.Radio,
|
||||
"radio_from": event.RadioFrom,
|
||||
"radio_to": event.RadioTo,
|
||||
"key": event.Key,
|
||||
"interface": event.InIface,
|
||||
"event_type": event.EventType,
|
||||
"ssid": event.SSID,
|
||||
"channel": event.Channel.Txt,
|
||||
"channel_from": event.ChannelFrom.Txt,
|
||||
"channel_to": event.ChannelTo.Txt,
|
||||
"usgip": event.USGIP,
|
||||
"network": event.Network,
|
||||
"app_protocol": event.AppProto,
|
||||
"protocol": event.Proto,
|
||||
"action": event.InnerAlertAction,
|
||||
"src_country": event.SrcIPCountry,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
package lokiunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const typeIDS = "IDS"
|
||||
|
||||
// event stores a structured event Event for batch sending to Loki.
|
||||
func (r *Report) IDS(event *unifi.IDS, logs *Logs) {
|
||||
if event.Datetime.Before(r.Oldest) {
|
||||
return
|
||||
}
|
||||
|
||||
r.Counts[typeIDS]++ // increase counter and append new log line.
|
||||
|
||||
logs.Streams = append(logs.Streams, LogStream{
|
||||
Entries: [][]string{{strconv.FormatInt(event.Datetime.UnixNano(), 10), event.Msg}},
|
||||
Labels: CleanLabels(map[string]string{
|
||||
"application": "unifi_ids",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"subsystem": event.Subsystem,
|
||||
"category": event.Catname,
|
||||
"event_type": event.EventType,
|
||||
"key": event.Key,
|
||||
"app_protocol": event.AppProto,
|
||||
"protocol": event.Proto,
|
||||
"interface": event.InIface,
|
||||
"src_country": event.SrcIPCountry,
|
||||
"usgip": event.USGIP,
|
||||
"action": event.InnerAlertAction,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
*.so
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT LICENSE.
|
||||
Copyright (c) 2018-2020 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
# MYSQL Output Plugin Example
|
||||
|
||||
This plugin is not finished and did not get finished for the release of poller v2.
|
||||
Sorry about that. I'll try to get it working soon! 2/4/20
|
||||
|
||||
The code here, and the dynamic plugin provided shows an example of how you can
|
||||
write your own output for unifi-poller. This plugin records some very basic
|
||||
data about clients on a unifi network into a mysql database.
|
||||
|
||||
You could write outputs that do... anything. An example: They could compare current
|
||||
connected clients to a previous list (in a db, or stored in memory), and send a
|
||||
notification if it changes. The possibilities are endless.
|
||||
|
||||
You must compile your plugin using the unifi-poller source for the version you're
|
||||
using. In other words, to build a plugin for version 2.0.1, do this:
|
||||
|
||||
```bash
|
||||
mkdir -p $GOPATH/src/github.com/unifi-poller
|
||||
cd $GOPATH/src/github.com/unifi-poller
|
||||
|
||||
git clone git@github.com:unifi-poller/unifi-poller.git
|
||||
cd unifi-poller
|
||||
|
||||
git checkout v2.0.1
|
||||
|
||||
cp -r <your plugin> plugins/
|
||||
GOOS=linux make plugins
|
||||
```
|
||||
|
||||
The plugin you copy in *must* have a `main.go` file for `make plugins` to build it.
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
package main
|
||||
|
||||
/* Everything in this file runs after the config is unmarshalled and we've
|
||||
verified the configuration for the poller. */
|
||||
|
||||
func (p *plugin) runCollector() error {
|
||||
p.Logf("mysql plugin is not finished")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"golift.io/cnfg"
|
||||
)
|
||||
|
||||
// Only capital (exported) members are unmarshaled when passed into poller.NewOutput().
|
||||
type plugin struct {
|
||||
*Config `json:"mysql" toml:"mysql" xml:"mysql" yaml:"mysql"`
|
||||
poller.Collect
|
||||
}
|
||||
|
||||
// Config represents the data that is unmarshalled from the up.conf config file for this plugins.
|
||||
// See up.conf.example.mysql for sample input data.
|
||||
type Config struct {
|
||||
Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
|
||||
Interval cnfg.Duration `json:"interval" toml:"interval" xml:"interval" yaml:"interval"`
|
||||
Host string `json:"host" toml:"host" xml:"host" yaml:"host"`
|
||||
User string `json:"user" toml:"user" xml:"user" yaml:"user"`
|
||||
Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
|
||||
DB string `json:"db" toml:"db" xml:"db" yaml:"db"`
|
||||
Devices []Device `json:"devices" toml:"devices" xml:"device" yaml:"devices"`
|
||||
Clients *Clients `json:"clients" toml:"clients" xml:"clients" yaml:"clients"`
|
||||
}
|
||||
|
||||
// Device represents the configuration to save a devices' data.
|
||||
// Type is one of uap, usw, ugw, udm.
|
||||
// Table represents the mysql table name we save these fields to.
|
||||
// Fields is a map of api response data key -> mysql column.
|
||||
type Device struct {
|
||||
Type string `json:"type" toml:"type" xml:"type" yaml:"type"`
|
||||
Table string `json:"table" toml:"table" xml:"table" yaml:"table"`
|
||||
Fields map[string]string `json:"fields" toml:"fields" xml:"field" yaml:"fields"`
|
||||
}
|
||||
|
||||
// Clients represents the configuration to save clients' data.
|
||||
// Table represents the mysql table name we save these fields to.
|
||||
// Fields is a map of api response data key -> mysql column.
|
||||
type Clients struct {
|
||||
Table string `json:"table" toml:"table" xml:"table" yaml:"table"`
|
||||
Fields map[string]string `json:"fields" toml:"fields" xml:"field" yaml:"fields"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
u := &plugin{Config: &Config{}}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: "mysql",
|
||||
Config: u, // pass in the struct *above* your config (so it can see the struct tags).
|
||||
Method: u.Run,
|
||||
})
|
||||
}
|
||||
|
||||
// Run gets called by poller core code. Return when the plugin stops working or has an error.
|
||||
// In other words, don't run your code in a go routine, it already is.
|
||||
func (p *plugin) Run(c poller.Collect) error {
|
||||
if p.Collect = c; c == nil || p.Config == nil || p.Disable {
|
||||
return nil // no config or disabled, bail out.
|
||||
}
|
||||
|
||||
if err := p.validateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.runCollector()
|
||||
}
|
||||
|
||||
// validateConfig checks input sanity.
|
||||
func (p *plugin) validateConfig() error {
|
||||
if p.Interval.Duration == 0 {
|
||||
return fmt.Errorf("must provide a polling interval")
|
||||
}
|
||||
|
||||
if p.Clients == nil && len(p.Devices) == 0 {
|
||||
return fmt.Errorf("must configure client or device collection; both empty")
|
||||
}
|
||||
|
||||
for _, d := range p.Devices {
|
||||
if len(d.Fields) == 0 {
|
||||
return fmt.Errorf("no fields defined for device type %s, table %s", d.Type, d.Table)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Clients != nil && p.Clients.Fields == nil {
|
||||
return fmt.Errorf("no fields defined for clients; if you don't want to store client data, remove it from the config")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// main() is required, but it shouldn't do much as it's not used in plugin mode.
|
||||
func main() {
|
||||
fmt.Println("this is a unifi-poller plugin; not an application")
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
[poller]
|
||||
debug = true
|
||||
plugins = ["/path/to/mysql.so"]
|
||||
|
||||
[mysql]
|
||||
interval = "30s"
|
||||
host = "127.0.0.1:3306"
|
||||
user = "unifipoller"
|
||||
pass = "unifipoller"
|
||||
db = "unifipoller"
|
||||
|
||||
[mysql.clients]
|
||||
table = "client_records"
|
||||
[mysql.clients.fields]
|
||||
tx_bytes = "tx-bytes"
|
||||
rx_bytes = "rx-bytes"
|
||||
|
||||
[[mysql.devices]]
|
||||
type = "uap"
|
||||
table = "uap_records"
|
||||
[mysql.devices.fields]
|
||||
tx_bytes = "tx-bytes"
|
||||
rx_bytes = "rx-bytes"
|
||||
|
||||
[[mysql.devices]]
|
||||
type = "ugw"
|
||||
table = "usg_records"
|
||||
[mysql.devices.fields]
|
||||
wan_bytes = "wan_bytes"
|
||||
|
||||
[unifi.defaults]
|
||||
url = "https://127.0.0.1:8443"
|
||||
user = "unifipoller"
|
||||
pass = "4BB9345C-2341-48D7-99F5-E01B583FF77F"
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT LICENSE.
|
||||
Copyright (c) 2018-2020 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# poller
|
||||
|
||||
## UniFi Poller Core
|
||||
|
||||
This module ties the inputs together with the outputs.
|
||||
|
||||
Aggregates metrics on request. Provides CLI app and args parsing.
|
||||
|
||||
## Ideal
|
||||
|
||||
This library has no notion of "UniFi" or controllers, or Influx, or Prometheus.
|
||||
This library simply provides an input interface and an output interface.
|
||||
Each interface uses an `[]interface{}` type, so any type of data can be used.
|
||||
That is to say, you could write input and output plugins that work with, say,
|
||||
Cisco gear, or any other network (or even non-network) data. The existing plugins
|
||||
should provide ample example of how to use this library, but at some point the
|
||||
godoc will improve.
|
||||
|
||||
## Features
|
||||
|
||||
- Automatically unmarshal's plugin config structs from config file and/or env variables.
|
||||
- Initializes all "imported" plugins on startup.
|
||||
- Provides input plugins a Logger, requires an interface for Metrics and Events retrieval.
|
||||
- Provides Output plugins an interface to retrieve Metrics and Events, and a Logger.
|
||||
- Provides automatic aggregation of Metrics and Events from multiple sources.
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
package poller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// PrintRawMetrics prints raw json from the UniFi Controller. This is currently
|
||||
// tied into the -j CLI arg, and is probably not very useful outside that context.
|
||||
func (u *UnifiPoller) PrintRawMetrics() (err error) {
|
||||
split := strings.SplitN(u.Flags.DumpJSON, " ", 2)
|
||||
filter := &Filter{Kind: split[0]}
|
||||
|
||||
// Allows you to grab a controller other than 0 from config.
|
||||
if split2 := strings.Split(filter.Kind, ":"); len(split2) > 1 {
|
||||
filter.Kind = split2[0]
|
||||
filter.Unit, _ = strconv.Atoi(split2[1])
|
||||
}
|
||||
|
||||
// Used with "other"
|
||||
if len(split) > 1 {
|
||||
filter.Path = split[1]
|
||||
}
|
||||
|
||||
// As of now we only have one input plugin, so target that [0].
|
||||
m, err := inputs[0].RawMetrics(filter)
|
||||
fmt.Println(string(m))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// PrintPasswordHash prints a bcrypt'd password. Useful for the web server.
|
||||
func (u *UnifiPoller) PrintPasswordHash() (err error) {
|
||||
pwd := []byte(u.Flags.HashPW)
|
||||
|
||||
if u.Flags.HashPW == "-" {
|
||||
fmt.Print("Enter Password: ")
|
||||
|
||||
pwd, err = term.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading stdin: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println() // print a newline.
|
||||
}
|
||||
|
||||
hash, err := bcrypt.GenerateFromPassword(pwd, bcrypt.MinCost)
|
||||
fmt.Println(string(hash))
|
||||
|
||||
return err //nolint:wrapcheck
|
||||
}
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
package poller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"plugin"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golift.io/cnfg"
|
||||
"golift.io/cnfgfile"
|
||||
)
|
||||
|
||||
const (
|
||||
// AppName is the name of the application.
|
||||
AppName = "unpoller"
|
||||
// ENVConfigPrefix is the prefix appended to an env variable tag name.
|
||||
ENVConfigPrefix = "UP"
|
||||
)
|
||||
|
||||
// DefaultConfFile is where to find config if --config is not provided.
|
||||
func DefaultConfFile() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return `C:\ProgramData\unifi-poller\up.conf`
|
||||
case "darwin":
|
||||
fallthrough
|
||||
case "freebsd":
|
||||
fallthrough
|
||||
case "netbsd":
|
||||
fallthrough
|
||||
case "openbsd":
|
||||
return "/etc/unifi-poller/up.conf,/usr/local/etc/unifi-poller/up.conf"
|
||||
default:
|
||||
// linux and everything else
|
||||
return "/config/unifi-poller.conf,/etc/unifi-poller/up.conf"
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultObjPath is the path to look for shared object libraries (plugins).
|
||||
func DefaultObjPath() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
// DefaultObjPath is useless in this context. Bummer.
|
||||
return "PLUGINS_DO_NOT_WORK_ON_WINDOWS_SOWWWWWY"
|
||||
case "darwin":
|
||||
fallthrough
|
||||
case "freebsd":
|
||||
fallthrough
|
||||
case "netbsd":
|
||||
fallthrough
|
||||
case "openbsd":
|
||||
return "/usr/local/lib/unifi-poller"
|
||||
default:
|
||||
// linux and everything else
|
||||
return "/usr/lib/unifi-poller"
|
||||
}
|
||||
}
|
||||
|
||||
// UnifiPoller contains the application startup data, and auth info for UniFi & Influx.
|
||||
type UnifiPoller struct {
|
||||
Flags *Flags
|
||||
*Config
|
||||
}
|
||||
|
||||
// Flags represents the CLI args available and their settings.
|
||||
type Flags struct {
|
||||
ConfigFile string
|
||||
DumpJSON string
|
||||
HashPW string
|
||||
ShowVer bool
|
||||
*pflag.FlagSet
|
||||
}
|
||||
|
||||
// Metrics is a type shared by the exporting and reporting packages.
|
||||
type Metrics struct {
|
||||
TS time.Time
|
||||
Sites []interface{}
|
||||
Clients []interface{}
|
||||
SitesDPI []interface{}
|
||||
ClientsDPI []interface{}
|
||||
Devices []interface{}
|
||||
RogueAPs []interface{}
|
||||
}
|
||||
|
||||
// Events defines the type for log entries.
|
||||
type Events struct {
|
||||
Logs []interface{}
|
||||
}
|
||||
|
||||
// Config represents the core library input data.
|
||||
type Config struct {
|
||||
*Poller `json:"poller" toml:"poller" xml:"poller" yaml:"poller"`
|
||||
}
|
||||
|
||||
// Poller is the global config values.
|
||||
type Poller struct {
|
||||
Plugins []string `json:"plugins" toml:"plugins" xml:"plugin" yaml:"plugins"`
|
||||
Debug bool `json:"debug" toml:"debug" xml:"debug,attr" yaml:"debug"`
|
||||
Quiet bool `json:"quiet" toml:"quiet" xml:"quiet,attr" yaml:"quiet"`
|
||||
}
|
||||
|
||||
// LoadPlugins reads-in dynamic shared libraries.
|
||||
// Not used very often, if at all.
|
||||
func (u *UnifiPoller) LoadPlugins() error {
|
||||
for _, p := range u.Plugins {
|
||||
name := strings.TrimSuffix(p, ".so") + ".so"
|
||||
|
||||
if name == ".so" {
|
||||
continue // Just ignore it. uhg.
|
||||
}
|
||||
|
||||
if _, err := os.Stat(name); os.IsNotExist(err) {
|
||||
name = path.Join(DefaultObjPath(), name)
|
||||
}
|
||||
|
||||
u.Logf("Loading Dynamic Plugin: %s", name)
|
||||
|
||||
if _, err := plugin.Open(name); err != nil {
|
||||
return fmt.Errorf("opening plugin: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfigs parses the poller config and the config for each registered output plugin.
|
||||
func (u *UnifiPoller) ParseConfigs() error {
|
||||
// Parse core config.
|
||||
if err := u.parseInterface(u.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load dynamic plugins.
|
||||
if err := u.LoadPlugins(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := u.parseInputs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return u.parseOutputs()
|
||||
}
|
||||
|
||||
// getFirstFile returns the first file that exists and is "reachable".
|
||||
func getFirstFile(files []string) (string, error) {
|
||||
var err error
|
||||
|
||||
for _, f := range files {
|
||||
if _, err = os.Stat(f); err == nil {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("finding file: %w", err)
|
||||
}
|
||||
|
||||
// parseInterface parses the config file and environment variables into the provided interface.
|
||||
func (u *UnifiPoller) parseInterface(i interface{}) error {
|
||||
// Parse config file into provided interface.
|
||||
if err := cnfgfile.Unmarshal(i, u.Flags.ConfigFile); err != nil {
|
||||
return fmt.Errorf("cnfg unmarshal: %w", err)
|
||||
}
|
||||
|
||||
// Parse environment variables into provided interface.
|
||||
if _, err := cnfg.UnmarshalENV(i, ENVConfigPrefix); err != nil {
|
||||
return fmt.Errorf("env unmarshal: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse input plugin configs.
|
||||
func (u *UnifiPoller) parseInputs() error {
|
||||
inputSync.Lock()
|
||||
defer inputSync.Unlock()
|
||||
|
||||
for _, i := range inputs {
|
||||
if err := u.parseInterface(i.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse output plugin configs.
|
||||
func (u *UnifiPoller) parseOutputs() error {
|
||||
outputSync.Lock()
|
||||
defer outputSync.Unlock()
|
||||
|
||||
for _, o := range outputs {
|
||||
if err := u.parseInterface(o.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
package poller
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// These are used ot keep track of loaded input plugins.
|
||||
inputs []*InputPlugin // nolint: gochecknoglobals
|
||||
inputSync sync.RWMutex // nolint: gochecknoglobals
|
||||
)
|
||||
|
||||
// Input plugins must implement this interface.
|
||||
type Input interface {
|
||||
Initialize(Logger) error // Called once on startup to initialize the plugin.
|
||||
Metrics(*Filter) (*Metrics, error) // Called every time new metrics are requested.
|
||||
Events(*Filter) (*Events, error) // This is new.
|
||||
RawMetrics(*Filter) ([]byte, error)
|
||||
}
|
||||
|
||||
// InputPlugin describes an input plugin's consumable interface.
|
||||
type InputPlugin struct {
|
||||
Name string
|
||||
Config interface{} // Each config is passed into an unmarshaller later.
|
||||
Input
|
||||
}
|
||||
|
||||
// Filter is used for metrics filters. Many fields for lots of expansion.
|
||||
type Filter struct {
|
||||
Type string
|
||||
Term string
|
||||
Name string
|
||||
Role string
|
||||
Kind string
|
||||
Path string
|
||||
Text string
|
||||
Unit int
|
||||
Pass bool
|
||||
Skip bool
|
||||
Time time.Time
|
||||
Dur time.Duration
|
||||
}
|
||||
|
||||
// NewInput creates a metric input. This should be called by input plugins
|
||||
// init() functions.
|
||||
func NewInput(i *InputPlugin) {
|
||||
inputSync.Lock()
|
||||
defer inputSync.Unlock()
|
||||
|
||||
if i == nil || i.Input == nil {
|
||||
panic("nil output or method passed to poller.NewOutput")
|
||||
}
|
||||
|
||||
inputs = append(inputs, i)
|
||||
}
|
||||
|
||||
// InitializeInputs runs the passed-in initializer method for each input plugin.
|
||||
func (u *UnifiPoller) InitializeInputs() error {
|
||||
inputSync.RLock()
|
||||
defer inputSync.RUnlock()
|
||||
|
||||
for _, input := range inputs {
|
||||
// This must return, or the app locks up here.
|
||||
if err := input.Initialize(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Events aggregates log messages (events) from one or more sources.
|
||||
func (u *UnifiPoller) Events(filter *Filter) (*Events, error) {
|
||||
inputSync.RLock()
|
||||
defer inputSync.RUnlock()
|
||||
|
||||
events := Events{}
|
||||
|
||||
for _, input := range inputs {
|
||||
if filter != nil &&
|
||||
filter.Name != "" &&
|
||||
!strings.EqualFold(input.Name, filter.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
e, err := input.Events(filter)
|
||||
if err != nil {
|
||||
return &events, err
|
||||
}
|
||||
|
||||
// Logs is the only member to extend at this time.
|
||||
events.Logs = append(events.Logs, e.Logs...)
|
||||
}
|
||||
|
||||
return &events, nil
|
||||
}
|
||||
|
||||
// Metrics aggregates all the measurements from filtered inputs and returns them.
|
||||
// Passing a null filter returns everything!
|
||||
func (u *UnifiPoller) Metrics(filter *Filter) (*Metrics, error) {
|
||||
inputSync.RLock()
|
||||
defer inputSync.RUnlock()
|
||||
|
||||
metrics := &Metrics{}
|
||||
|
||||
for _, input := range inputs {
|
||||
if filter != nil &&
|
||||
filter.Name != "" &&
|
||||
!strings.EqualFold(input.Name, filter.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := input.Metrics(filter)
|
||||
if err != nil {
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metrics = AppendMetrics(metrics, m)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// AppendMetrics combines the metrics from two sources.
|
||||
func AppendMetrics(existing *Metrics, m *Metrics) *Metrics {
|
||||
if existing == nil {
|
||||
return m
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return existing
|
||||
}
|
||||
|
||||
existing.SitesDPI = append(existing.SitesDPI, m.SitesDPI...)
|
||||
existing.Sites = append(existing.Sites, m.Sites...)
|
||||
existing.ClientsDPI = append(existing.ClientsDPI, m.ClientsDPI...)
|
||||
existing.RogueAPs = append(existing.RogueAPs, m.RogueAPs...)
|
||||
existing.Clients = append(existing.Clients, m.Clients...)
|
||||
existing.Devices = append(existing.Devices, m.Devices...)
|
||||
|
||||
return existing
|
||||
}
|
||||
|
||||
// Inputs allows output plugins to see the list of loaded input plugins.
|
||||
func (u *UnifiPoller) Inputs() (names []string) {
|
||||
inputSync.RLock()
|
||||
defer inputSync.RUnlock()
|
||||
|
||||
for i := range inputs {
|
||||
names = append(names, inputs[i].Name)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
package poller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Log the command that called these commands.
|
||||
const callDepth = 2
|
||||
|
||||
// Logger is passed into input packages so they may write logs.
|
||||
type Logger interface {
|
||||
Logf(m string, v ...interface{})
|
||||
LogErrorf(m string, v ...interface{})
|
||||
LogDebugf(m string, v ...interface{})
|
||||
}
|
||||
|
||||
// Logf prints a log entry if quiet is false.
|
||||
func (u *UnifiPoller) Logf(m string, v ...interface{}) {
|
||||
if !u.Quiet {
|
||||
_ = log.Output(callDepth, fmt.Sprintf("[INFO] "+m, v...))
|
||||
}
|
||||
}
|
||||
|
||||
// LogDebugf prints a debug log entry if debug is true and quite is false.
|
||||
func (u *UnifiPoller) LogDebugf(m string, v ...interface{}) {
|
||||
if u.Debug && !u.Quiet {
|
||||
_ = log.Output(callDepth, fmt.Sprintf("[DEBUG] "+m, v...))
|
||||
}
|
||||
}
|
||||
|
||||
// LogErrorf prints an error log entry.
|
||||
func (u *UnifiPoller) LogErrorf(m string, v ...interface{}) {
|
||||
_ = log.Output(callDepth, fmt.Sprintf("[ERROR] "+m, v...))
|
||||
}
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
package poller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// These are used to keep track of loaded output plugins.
|
||||
outputs []*Output // nolint: gochecknoglobals
|
||||
outputSync sync.RWMutex // nolint: gochecknoglobals
|
||||
errNoOutputPlugins = fmt.Errorf("no output plugins imported")
|
||||
errAllOutputStopped = fmt.Errorf("all output plugins have stopped, or none enabled")
|
||||
)
|
||||
|
||||
// Collect is passed into output packages so they may collect metrics to output.
|
||||
type Collect interface {
|
||||
Logger
|
||||
Metrics(*Filter) (*Metrics, error)
|
||||
Events(*Filter) (*Events, error)
|
||||
// These get used by the webserver output plugin.
|
||||
Poller() Poller
|
||||
Inputs() []string
|
||||
Outputs() []string
|
||||
}
|
||||
|
||||
// Output defines the output data for a metric exporter like influx or prometheus.
|
||||
// Output packages should call NewOutput with this struct in init().
|
||||
type Output struct {
|
||||
Name string
|
||||
Config interface{} // Each config is passed into an unmarshaller later.
|
||||
Method func(Collect) error // Called on startup for each configured output.
|
||||
}
|
||||
|
||||
// NewOutput should be called by each output package's init function.
|
||||
func NewOutput(o *Output) {
|
||||
outputSync.Lock()
|
||||
defer outputSync.Unlock()
|
||||
|
||||
if o == nil || o.Method == nil {
|
||||
panic("nil output or method passed to poller.NewOutput")
|
||||
}
|
||||
|
||||
outputs = append(outputs, o)
|
||||
}
|
||||
|
||||
// Poller returns the poller config.
|
||||
func (u *UnifiPoller) Poller() Poller {
|
||||
return *u.Config.Poller
|
||||
}
|
||||
|
||||
// InitializeOutputs runs all the configured output plugins.
|
||||
// If none exist, or they all exit an error is returned.
|
||||
func (u *UnifiPoller) InitializeOutputs() error {
|
||||
count, errChan := u.runOutputMethods()
|
||||
defer close(errChan)
|
||||
|
||||
if count == 0 {
|
||||
return errNoOutputPlugins
|
||||
}
|
||||
|
||||
// Wait for and return an error from any output plugin.
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count--; count == 0 {
|
||||
return errAllOutputStopped
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UnifiPoller) runOutputMethods() (int, chan error) {
|
||||
// Output plugin errors go into this channel.
|
||||
err := make(chan error)
|
||||
|
||||
outputSync.RLock()
|
||||
defer outputSync.RUnlock()
|
||||
|
||||
for _, o := range outputs {
|
||||
go func(o *Output) {
|
||||
err <- o.Method(u) // Run each output plugin
|
||||
}(o)
|
||||
}
|
||||
|
||||
return len(outputs), err
|
||||
}
|
||||
|
||||
// Outputs allows other output plugins to see the list of loaded output plugins.
|
||||
func (u *UnifiPoller) Outputs() (names []string) {
|
||||
outputSync.RLock()
|
||||
defer outputSync.RUnlock()
|
||||
|
||||
for i := range outputs {
|
||||
names = append(names, outputs[i].Name)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
// Package poller provides the CLI interface to setup unifi-poller.
|
||||
package poller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golift.io/version"
|
||||
)
|
||||
|
||||
// New returns a new poller struct.
|
||||
func New() *UnifiPoller {
|
||||
return &UnifiPoller{Config: &Config{Poller: &Poller{}}, Flags: &Flags{}}
|
||||
}
|
||||
|
||||
// Start begins the application from a CLI.
|
||||
// Parses cli flags, parses config file, parses env vars, sets up logging, then:
|
||||
// - dumps a json payload OR - executes Run().
|
||||
func (u *UnifiPoller) Start() error {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.LstdFlags)
|
||||
u.Flags.Parse(os.Args[1:])
|
||||
|
||||
if u.Flags.ShowVer {
|
||||
fmt.Println(version.Print(AppName))
|
||||
return nil // don't run anything else w/ version request.
|
||||
}
|
||||
|
||||
if u.Flags.HashPW != "" {
|
||||
return u.PrintPasswordHash()
|
||||
}
|
||||
|
||||
cfile, err := getFirstFile(strings.Split(u.Flags.ConfigFile, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.Flags.ConfigFile = cfile
|
||||
if u.Flags.DumpJSON == "" { // do not print this when dumping JSON.
|
||||
u.Logf("Loading Configuration File: %s", u.Flags.ConfigFile)
|
||||
}
|
||||
|
||||
// Parse config file and ENV variables.
|
||||
if err := u.ParseConfigs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return u.Run()
|
||||
}
|
||||
|
||||
// Parse turns CLI arguments into data structures. Called by Start() on startup.
|
||||
func (f *Flags) Parse(args []string) {
|
||||
f.FlagSet = pflag.NewFlagSet(AppName, pflag.ExitOnError)
|
||||
f.Usage = func() {
|
||||
fmt.Printf("Usage: %s [--config=/path/to/up.conf] [--version]", AppName)
|
||||
f.PrintDefaults()
|
||||
}
|
||||
|
||||
f.StringVarP(&f.HashPW, "encrypt", "e", "",
|
||||
"This option bcrypts a provided string. Useful for the webserver password. Use - to be prompted.")
|
||||
f.StringVarP(&f.DumpJSON, "dumpjson", "j", "",
|
||||
"This debug option prints a json payload and exits. See man page for more info.")
|
||||
f.StringVarP(&f.ConfigFile, "config", "c", DefaultConfFile(),
|
||||
"Poller config file path. Separating multiple paths with a comma will load the first config file found.")
|
||||
f.BoolVarP(&f.ShowVer, "version", "v", false, "Print the version and exit.")
|
||||
_ = f.FlagSet.Parse(args) // pflag.ExitOnError means this will never return error.
|
||||
}
|
||||
|
||||
// Run picks a mode and executes the associated functions. This will do one of three things:
|
||||
// 1. Start the collector routine that polls unifi and reports to influx on an interval. (default)
|
||||
// 2. Run the collector one time and report the metrics to influxdb. (lambda)
|
||||
// 3. Start a web server and wait for Prometheus to poll the application for metrics.
|
||||
func (u *UnifiPoller) Run() error {
|
||||
if u.Flags.DumpJSON != "" {
|
||||
u.Config.Quiet = true
|
||||
if err := u.InitializeInputs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return u.PrintRawMetrics()
|
||||
}
|
||||
|
||||
if u.Debug {
|
||||
log.SetFlags(log.Lshortfile | log.Lmicroseconds | log.Ldate)
|
||||
u.LogDebugf("Debug Logging Enabled")
|
||||
}
|
||||
|
||||
log.Printf("[INFO] UniFi Poller v%v Starting Up! PID: %d", version.Version, os.Getpid())
|
||||
|
||||
if err := u.InitializeInputs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return u.InitializeOutputs()
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT LICENSE.
|
||||
Copyright (c) 2018-2020 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
# prometheus
|
||||
|
||||
This package provides the interface to turn UniFi measurements into prometheus
|
||||
exported metrics. Requires the poller package for actual UniFi data collection.
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
type uclient struct {
|
||||
Anomalies *prometheus.Desc
|
||||
BytesR *prometheus.Desc
|
||||
CCQ *prometheus.Desc
|
||||
Satisfaction *prometheus.Desc
|
||||
Noise *prometheus.Desc
|
||||
RoamCount *prometheus.Desc
|
||||
RSSI *prometheus.Desc
|
||||
RxBytes *prometheus.Desc
|
||||
RxBytesR *prometheus.Desc
|
||||
RxPackets *prometheus.Desc
|
||||
RxRate *prometheus.Desc
|
||||
Signal *prometheus.Desc
|
||||
TxBytes *prometheus.Desc
|
||||
TxBytesR *prometheus.Desc
|
||||
TxPackets *prometheus.Desc
|
||||
TxRetries *prometheus.Desc
|
||||
TxPower *prometheus.Desc
|
||||
TxRate *prometheus.Desc
|
||||
Uptime *prometheus.Desc
|
||||
WifiTxAttempts *prometheus.Desc
|
||||
WiredRxBytes *prometheus.Desc
|
||||
WiredRxBytesR *prometheus.Desc
|
||||
WiredRxPackets *prometheus.Desc
|
||||
WiredTxBytes *prometheus.Desc
|
||||
WiredTxBytesR *prometheus.Desc
|
||||
WiredTxPackets *prometheus.Desc
|
||||
DPITxPackets *prometheus.Desc
|
||||
DPIRxPackets *prometheus.Desc
|
||||
DPITxBytes *prometheus.Desc
|
||||
DPIRxBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
func descClient(ns string) *uclient {
|
||||
labels := []string{
|
||||
"name", "mac", "site_name", "gw_name", "sw_name", "vlan",
|
||||
"ip", "oui", "network", "sw_port", "ap_name", "source", "wired",
|
||||
}
|
||||
labelW := append([]string{"radio_name", "radio", "radio_proto", "channel", "essid", "bssid", "radio_desc"}, labels...)
|
||||
labelDPI := []string{"name", "mac", "site_name", "source", "category", "application"}
|
||||
|
||||
return &uclient{
|
||||
Anomalies: prometheus.NewDesc(ns+"anomalies", "Client Anomalies", labelW, nil),
|
||||
BytesR: prometheus.NewDesc(ns+"transfer_rate_bytes", "Client Data Rate", labelW, nil),
|
||||
CCQ: prometheus.NewDesc(ns+"ccq_ratio", "Client Connection Quality", labelW, nil),
|
||||
Satisfaction: prometheus.NewDesc(ns+"satisfaction_ratio", "Client Satisfaction", labelW, nil),
|
||||
Noise: prometheus.NewDesc(ns+"noise_db", "Client AP Noise", labelW, nil),
|
||||
RoamCount: prometheus.NewDesc(ns+"roam_count_total", "Client Roam Counter", labelW, nil),
|
||||
RSSI: prometheus.NewDesc(ns+"rssi_db", "Client RSSI", labelW, nil),
|
||||
RxBytes: prometheus.NewDesc(ns+"receive_bytes_total", "Client Receive Bytes", labels, nil),
|
||||
RxBytesR: prometheus.NewDesc(ns+"receive_rate_bytes", "Client Receive Data Rate", labels, nil),
|
||||
RxPackets: prometheus.NewDesc(ns+"receive_packets_total", "Client Receive Packets", labels, nil),
|
||||
RxRate: prometheus.NewDesc(ns+"radio_receive_rate_bps", "Client Receive Rate", labelW, nil),
|
||||
Signal: prometheus.NewDesc(ns+"radio_signal_db", "Client Signal Strength", labelW, nil),
|
||||
TxBytes: prometheus.NewDesc(ns+"transmit_bytes_total", "Client Transmit Bytes", labels, nil),
|
||||
TxBytesR: prometheus.NewDesc(ns+"transmit_rate_bytes", "Client Transmit Data Rate", labels, nil),
|
||||
TxPackets: prometheus.NewDesc(ns+"transmit_packets_total", "Client Transmit Packets", labels, nil),
|
||||
TxRetries: prometheus.NewDesc(ns+"transmit_retries_total", "Client Transmit Retries", labels, nil),
|
||||
TxPower: prometheus.NewDesc(ns+"radio_transmit_power_dbm", "Client Transmit Power", labelW, nil),
|
||||
TxRate: prometheus.NewDesc(ns+"radio_transmit_rate_bps", "Client Transmit Rate", labelW, nil),
|
||||
WifiTxAttempts: prometheus.NewDesc(ns+"wifi_attempts_transmit_total", "Client Wifi Transmit Attempts", labelW, nil),
|
||||
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Client Uptime", labelW, nil),
|
||||
DPITxPackets: prometheus.NewDesc(ns+"dpi_transmit_packets", "Client DPI Transmit Packets", labelDPI, nil),
|
||||
DPIRxPackets: prometheus.NewDesc(ns+"dpi_receive_packets", "Client DPI Receive Packets", labelDPI, nil),
|
||||
DPITxBytes: prometheus.NewDesc(ns+"dpi_transmit_bytes", "Client DPI Transmit Bytes", labelDPI, nil),
|
||||
DPIRxBytes: prometheus.NewDesc(ns+"dpi_receive_bytes", "Client DPI Receive Bytes", labelDPI, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to ClientsDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
labelDPI := []string{
|
||||
s.Name, s.MAC, s.SiteName, s.SourceName,
|
||||
unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
|
||||
}
|
||||
|
||||
fillDPIMapTotals(appTotal, labelDPI[5], s.SourceName, s.SiteName, dpi)
|
||||
fillDPIMapTotals(catTotal, labelDPI[4], s.SourceName, s.SiteName, dpi)
|
||||
// log.Println(labelDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
|
||||
r.send([]*metric{
|
||||
{u.Client.DPITxPackets, counter, dpi.TxPackets, labelDPI},
|
||||
{u.Client.DPIRxPackets, counter, dpi.RxPackets, labelDPI},
|
||||
{u.Client.DPITxBytes, counter, dpi.TxBytes, labelDPI},
|
||||
{u.Client.DPIRxBytes, counter, dpi.RxBytes, labelDPI},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportClient(r report, c *unifi.Client) {
|
||||
labels := []string{
|
||||
c.Name, c.Mac, c.SiteName, c.GwName, c.SwName, c.Vlan.Txt,
|
||||
c.IP, c.Oui, c.Network, c.SwPort.Txt, c.ApName, c.SourceName, "",
|
||||
}
|
||||
labelW := append([]string{
|
||||
c.RadioName, c.Radio, c.RadioProto, c.Channel.Txt, c.Essid, c.Bssid, c.RadioDescription,
|
||||
}, labels...)
|
||||
|
||||
if c.IsWired.Val {
|
||||
labels[len(labels)-1] = "true"
|
||||
labelW[len(labelW)-1] = "true"
|
||||
|
||||
r.send([]*metric{
|
||||
{u.Client.RxBytes, counter, c.WiredRxBytes, labels},
|
||||
{u.Client.RxBytesR, gauge, c.WiredRxBytesR, labels},
|
||||
{u.Client.RxPackets, counter, c.WiredRxPackets, labels},
|
||||
{u.Client.TxBytes, counter, c.WiredTxBytes, labels},
|
||||
{u.Client.TxBytesR, gauge, c.WiredTxBytesR, labels},
|
||||
{u.Client.TxPackets, counter, c.WiredTxPackets, labels},
|
||||
})
|
||||
} else {
|
||||
labels[len(labels)-1] = "false"
|
||||
labelW[len(labelW)-1] = "false"
|
||||
|
||||
r.send([]*metric{
|
||||
{u.Client.Anomalies, counter, c.Anomalies, labelW},
|
||||
{u.Client.CCQ, gauge, float64(c.Ccq) / 1000.0, labelW},
|
||||
{u.Client.Satisfaction, gauge, c.Satisfaction.Val / 100.0, labelW},
|
||||
{u.Client.Noise, gauge, c.Noise, labelW},
|
||||
{u.Client.RoamCount, counter, c.RoamCount, labelW},
|
||||
{u.Client.RSSI, gauge, c.Rssi, labelW},
|
||||
{u.Client.Signal, gauge, c.Signal, labelW},
|
||||
{u.Client.TxPower, gauge, c.TxPower, labelW},
|
||||
{u.Client.TxRate, gauge, c.TxRate * 1000, labelW},
|
||||
{u.Client.WifiTxAttempts, counter, c.WifiTxAttempts, labelW},
|
||||
{u.Client.RxRate, gauge, c.RxRate * 1000, labelW},
|
||||
{u.Client.TxRetries, counter, c.TxRetries, labels},
|
||||
{u.Client.TxBytes, counter, c.TxBytes, labels},
|
||||
{u.Client.TxBytesR, gauge, c.TxBytesR, labels},
|
||||
{u.Client.TxPackets, counter, c.TxPackets, labels},
|
||||
{u.Client.RxBytes, counter, c.RxBytes, labels},
|
||||
{u.Client.RxBytesR, gauge, c.RxBytesR, labels},
|
||||
{u.Client.RxPackets, counter, c.RxPackets, labels},
|
||||
{u.Client.BytesR, gauge, c.BytesR, labelW},
|
||||
})
|
||||
}
|
||||
|
||||
r.send([]*metric{{u.Client.Uptime, gauge, c.Uptime, labelW}})
|
||||
}
|
||||
|
||||
// totalsDPImap: controller, site, name (app/cat name), dpi.
|
||||
type totalsDPImap map[string]map[string]map[string]unifi.DPIData
|
||||
|
||||
// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
|
||||
// This allows less processing in InfluxDB to produce total transfer data per cat or app.
|
||||
func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
|
||||
if _, ok := m[controller]; !ok {
|
||||
m[controller] = make(map[string]map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
if _, ok := m[controller][site]; !ok {
|
||||
m[controller][site] = make(map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
if _, ok := m[controller][site][name]; !ok {
|
||||
m[controller][site][name] = dpi
|
||||
return
|
||||
}
|
||||
|
||||
oldDPI := m[controller][site][name]
|
||||
oldDPI.TxPackets += dpi.TxPackets
|
||||
oldDPI.RxPackets += dpi.RxPackets
|
||||
oldDPI.TxBytes += dpi.TxBytes
|
||||
oldDPI.RxBytes += dpi.RxBytes
|
||||
m[controller][site][name] = oldDPI
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
|
||||
type all []struct {
|
||||
kind string
|
||||
val totalsDPImap
|
||||
}
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
if appTotal != nil {
|
||||
appTotal = nil
|
||||
}
|
||||
// This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
|
||||
a := all{
|
||||
|
||||
{
|
||||
kind: "application",
|
||||
val: appTotal,
|
||||
},
|
||||
|
||||
{
|
||||
kind: "category",
|
||||
val: catTotal,
|
||||
},
|
||||
}
|
||||
|
||||
for _, k := range a {
|
||||
for controller, s := range k.val {
|
||||
for site, c := range s {
|
||||
for name, m := range c {
|
||||
labelDPI := []string{"TOTAL", "TOTAL", site, controller, "TOTAL", "TOTAL"}
|
||||
|
||||
switch k.kind {
|
||||
case "application":
|
||||
labelDPI[5] = name
|
||||
case "category":
|
||||
labelDPI[4] = name
|
||||
case "name":
|
||||
labelDPI[0] = name
|
||||
case "mac":
|
||||
labelDPI[1] = name
|
||||
}
|
||||
|
||||
m := []*metric{
|
||||
{u.Client.DPITxPackets, counter, m.TxPackets, labelDPI},
|
||||
{u.Client.DPIRxPackets, counter, m.RxPackets, labelDPI},
|
||||
{u.Client.DPITxBytes, counter, m.TxBytes, labelDPI},
|
||||
{u.Client.DPIRxBytes, counter, m.RxBytes, labelDPI},
|
||||
}
|
||||
|
||||
r.send(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,362 @@
|
|||
// Package promunifi provides the bridge between unpoller metrics and prometheus.
|
||||
package promunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
promver "github.com/prometheus/common/version"
|
||||
"github.com/unpoller/unifi"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
"golift.io/version"
|
||||
)
|
||||
|
||||
// PluginName is the name of this plugin.
|
||||
const PluginName = "prometheus"
|
||||
|
||||
const (
|
||||
// channel buffer, fits at least one batch.
|
||||
defaultBuffer = 50
|
||||
defaultHTTPListen = "0.0.0.0:9130"
|
||||
// simply fewer letters.
|
||||
counter = prometheus.CounterValue
|
||||
gauge = prometheus.GaugeValue
|
||||
)
|
||||
|
||||
var ErrMetricFetchFailed = fmt.Errorf("metric fetch failed")
|
||||
|
||||
type promUnifi struct {
|
||||
*Config `json:"prometheus" toml:"prometheus" xml:"prometheus" yaml:"prometheus"`
|
||||
Client *uclient
|
||||
Device *unifiDevice
|
||||
UAP *uap
|
||||
USG *usg
|
||||
USW *usw
|
||||
Site *site
|
||||
RogueAP *rogueap
|
||||
// This interface is passed to the Collect() method. The Collect method uses
|
||||
// this interface to retrieve the latest UniFi measurements and export them.
|
||||
Collector poller.Collect
|
||||
}
|
||||
|
||||
// Config is the input (config file) data used to initialize this output plugin.
|
||||
type Config struct {
|
||||
// If non-empty, each of the collected metrics is prefixed by the
|
||||
// provided string and an underscore ("_").
|
||||
Namespace string `json:"namespace" toml:"namespace" xml:"namespace" yaml:"namespace"`
|
||||
HTTPListen string `json:"http_listen" toml:"http_listen" xml:"http_listen" yaml:"http_listen"`
|
||||
// If these are provided, the app will attempt to listen with an SSL connection.
|
||||
SSLCrtPath string `json:"ssl_cert_path" toml:"ssl_cert_path" xml:"ssl_cert_path" yaml:"ssl_cert_path"`
|
||||
SSLKeyPath string `json:"ssl_key_path" toml:"ssl_key_path" xml:"ssl_key_path" yaml:"ssl_key_path"`
|
||||
// Buffer is a channel buffer.
|
||||
// Default is probably 50. Seems fast there; try 1 to see if CPU usage goes down?
|
||||
Buffer int `json:"buffer" toml:"buffer" xml:"buffer" yaml:"buffer"`
|
||||
// If true, any error encountered during collection is reported as an
|
||||
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||
// and the collected metrics will be incomplete. Possibly, no metrics
|
||||
// will be collected at all.
|
||||
ReportErrors bool `json:"report_errors" toml:"report_errors" xml:"report_errors" yaml:"report_errors"`
|
||||
Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
|
||||
// Save data for dead ports? ie. ports that are down or disabled.
|
||||
DeadPorts bool `json:"dead_ports" toml:"dead_ports" xml:"dead_ports" yaml:"dead_ports"`
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
Desc *prometheus.Desc
|
||||
ValueType prometheus.ValueType
|
||||
Value interface{}
|
||||
Labels []string
|
||||
}
|
||||
|
||||
// Report accumulates counters that are printed to a log line.
|
||||
type Report struct {
|
||||
*Config
|
||||
Total int // Total count of metrics recorded.
|
||||
Errors int // Total count of errors recording metrics.
|
||||
Zeros int // Total count of metrics equal to zero.
|
||||
USG int // Total count of USG devices.
|
||||
USW int // Total count of USW devices.
|
||||
UAP int // Total count of UAP devices.
|
||||
UDM int // Total count of UDM devices.
|
||||
UXG int // Total count of UXG devices.
|
||||
Metrics *poller.Metrics // Metrics collected and recorded.
|
||||
Elapsed time.Duration // Duration elapsed collecting and exporting.
|
||||
Fetch time.Duration // Duration elapsed making controller requests.
|
||||
Start time.Time // Time collection began.
|
||||
ch chan []*metric
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// target is used for targeted (sometimes dynamic) metrics scrapes.
|
||||
type target struct {
|
||||
*poller.Filter
|
||||
u *promUnifi
|
||||
}
|
||||
|
||||
// init is how this modular code is initialized by the main app.
|
||||
// This module adds itself as an output module to the poller core.
|
||||
func init() { // nolint: gochecknoinits
|
||||
u := &promUnifi{Config: &Config{}}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: PluginName,
|
||||
Config: u,
|
||||
Method: u.Run,
|
||||
})
|
||||
}
|
||||
|
||||
// Run creates the collectors and starts the web server up.
|
||||
// Should be run in a Go routine. Returns nil if not configured.
|
||||
func (u *promUnifi) Run(c poller.Collect) error {
|
||||
if u.Collector = c; u.Config == nil || u.Disable {
|
||||
u.Logf("Prometheus config missing (or disabled), Prometheus HTTP listener disabled!")
|
||||
return nil
|
||||
}
|
||||
|
||||
u.Namespace = strings.Trim(strings.ReplaceAll(u.Namespace, "-", "_"), "_")
|
||||
if u.Namespace == "" {
|
||||
u.Namespace = strings.ReplaceAll(poller.AppName, "-", "")
|
||||
}
|
||||
|
||||
if u.HTTPListen == "" {
|
||||
u.HTTPListen = defaultHTTPListen
|
||||
}
|
||||
|
||||
if u.Buffer == 0 {
|
||||
u.Buffer = defaultBuffer
|
||||
}
|
||||
|
||||
u.Client = descClient(u.Namespace + "_client_")
|
||||
u.Device = descDevice(u.Namespace + "_device_") // stats for all device types.
|
||||
u.UAP = descUAP(u.Namespace + "_device_")
|
||||
u.USG = descUSG(u.Namespace + "_device_")
|
||||
u.USW = descUSW(u.Namespace + "_device_")
|
||||
u.Site = descSite(u.Namespace + "_site_")
|
||||
u.RogueAP = descRogueAP(u.Namespace + "_rogueap_")
|
||||
|
||||
mux := http.NewServeMux()
|
||||
promver.Version = version.Version
|
||||
promver.Revision = version.Revision
|
||||
promver.Branch = version.Branch
|
||||
|
||||
webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: u.Config})
|
||||
prometheus.MustRegister(promver.NewCollector(u.Namespace))
|
||||
prometheus.MustRegister(u)
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer,
|
||||
promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
|
||||
))
|
||||
mux.HandleFunc("/scrape", u.ScrapeHandler)
|
||||
mux.HandleFunc("/", u.DefaultHandler)
|
||||
|
||||
switch u.SSLKeyPath == "" && u.SSLCrtPath == "" {
|
||||
case true:
|
||||
u.Logf("Prometheus exported at http://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
|
||||
return http.ListenAndServe(u.HTTPListen, mux)
|
||||
default:
|
||||
u.Logf("Prometheus exported at https://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
|
||||
return http.ListenAndServeTLS(u.HTTPListen, u.SSLCrtPath, u.SSLKeyPath, mux)
|
||||
}
|
||||
}
|
||||
|
||||
// ScrapeHandler allows prometheus to scrape a single source, instead of all sources.
|
||||
func (u *promUnifi) ScrapeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
t := &target{u: u, Filter: &poller.Filter{
|
||||
Name: r.URL.Query().Get("input"), // "unifi"
|
||||
Path: r.URL.Query().Get("target"), // url: "https://127.0.0.1:8443"
|
||||
}}
|
||||
|
||||
if t.Name == "" {
|
||||
t.Name = "unifi" // the default
|
||||
}
|
||||
|
||||
if pathOld := r.URL.Query().Get("path"); pathOld != "" {
|
||||
u.LogErrorf("deprecated 'path' parameter used; update your config to use 'target'")
|
||||
|
||||
if t.Path == "" {
|
||||
t.Path = pathOld
|
||||
}
|
||||
}
|
||||
|
||||
if roleOld := r.URL.Query().Get("role"); roleOld != "" {
|
||||
u.LogErrorf("deprecated 'role' parameter used; update your config to use 'target'")
|
||||
|
||||
if t.Path == "" {
|
||||
t.Path = roleOld
|
||||
}
|
||||
}
|
||||
|
||||
if t.Path == "" {
|
||||
u.LogErrorf("'target' parameter missing on scrape from %v", r.RemoteAddr)
|
||||
http.Error(w, "'target' parameter must be specified: configured OR unconfigured url", 400)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
|
||||
registry.MustRegister(t)
|
||||
promhttp.HandlerFor(
|
||||
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
|
||||
).ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (u *promUnifi) DefaultHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(poller.AppName + "\n"))
|
||||
}
|
||||
|
||||
// Describe satisfies the prometheus Collector. This returns all of the
|
||||
// metric descriptions that this packages produces.
|
||||
func (t *target) Describe(ch chan<- *prometheus.Desc) {
|
||||
t.u.Describe(ch)
|
||||
}
|
||||
|
||||
// Describe satisfies the prometheus Collector. This returns all of the
|
||||
// metric descriptions that this packages produces.
|
||||
func (u *promUnifi) Describe(ch chan<- *prometheus.Desc) {
|
||||
for _, f := range []interface{}{u.Client, u.Device, u.UAP, u.USG, u.USW, u.Site} {
|
||||
v := reflect.Indirect(reflect.ValueOf(f))
|
||||
|
||||
// Loop each struct member and send it to the provided channel.
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
desc, ok := v.Field(i).Interface().(*prometheus.Desc)
|
||||
if ok && desc != nil {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect satisfies the prometheus Collector. This runs for a single controller poll.
|
||||
func (t *target) Collect(ch chan<- prometheus.Metric) {
|
||||
t.u.collect(ch, t.Filter)
|
||||
}
|
||||
|
||||
// Collect satisfies the prometheus Collector. This runs the input method to get
|
||||
// the current metrics (from another package) then exports them for prometheus.
|
||||
func (u *promUnifi) Collect(ch chan<- prometheus.Metric) {
|
||||
u.collect(ch, nil)
|
||||
}
|
||||
|
||||
func (u *promUnifi) collect(ch chan<- prometheus.Metric, filter *poller.Filter) {
|
||||
var err error
|
||||
|
||||
r := &Report{
|
||||
Config: u.Config,
|
||||
ch: make(chan []*metric, u.Config.Buffer),
|
||||
Start: time.Now(),
|
||||
}
|
||||
defer r.close()
|
||||
|
||||
r.Metrics, err = u.Collector.Metrics(filter)
|
||||
r.Fetch = time.Since(r.Start)
|
||||
|
||||
if err != nil {
|
||||
r.error(ch, prometheus.NewInvalidDesc(err), ErrMetricFetchFailed)
|
||||
u.LogErrorf("metric fetch failed: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Pass Report interface into our collecting and reporting methods.
|
||||
go u.exportMetrics(r, ch, r.ch)
|
||||
u.loopExports(r)
|
||||
}
|
||||
|
||||
// This is closely tied to the method above with a sync.WaitGroup.
|
||||
// This method runs in a go routine and exits when the channel closes.
|
||||
// This is where our channels connects to the prometheus channel.
|
||||
func (u *promUnifi) exportMetrics(r report, ch chan<- prometheus.Metric, ourChan chan []*metric) {
|
||||
descs := make(map[*prometheus.Desc]bool) // used as a counter
|
||||
defer r.report(u, descs)
|
||||
|
||||
for newMetrics := range ourChan {
|
||||
for _, m := range newMetrics {
|
||||
descs[m.Desc] = true
|
||||
|
||||
switch v := m.Value.(type) {
|
||||
case unifi.FlexInt:
|
||||
ch <- r.export(m, v.Val)
|
||||
case float64:
|
||||
ch <- r.export(m, v)
|
||||
case int64:
|
||||
ch <- r.export(m, float64(v))
|
||||
case int:
|
||||
ch <- r.export(m, float64(v))
|
||||
default:
|
||||
r.error(ch, m.Desc, fmt.Sprintf("not a number: %v", m.Value))
|
||||
}
|
||||
}
|
||||
|
||||
r.done()
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) loopExports(r report) {
|
||||
m := r.metrics()
|
||||
|
||||
for _, s := range m.RogueAPs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Sites {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.SitesDPI {
|
||||
u.exportSiteDPI(r, s)
|
||||
}
|
||||
|
||||
for _, c := range m.Clients {
|
||||
u.switchExport(r, c)
|
||||
}
|
||||
|
||||
for _, d := range m.Devices {
|
||||
u.switchExport(r, d)
|
||||
}
|
||||
|
||||
appTotal := make(totalsDPImap)
|
||||
catTotal := make(totalsDPImap)
|
||||
|
||||
for _, c := range m.ClientsDPI {
|
||||
u.exportClientDPI(r, c, appTotal, catTotal)
|
||||
}
|
||||
|
||||
u.exportClientDPItotals(r, appTotal, catTotal)
|
||||
}
|
||||
|
||||
func (u *promUnifi) switchExport(r report, v interface{}) {
|
||||
switch v := v.(type) {
|
||||
case *unifi.RogueAP:
|
||||
// r.addRogueAP()
|
||||
u.exportRogueAP(r, v)
|
||||
case *unifi.UAP:
|
||||
r.addUAP()
|
||||
u.exportUAP(r, v)
|
||||
case *unifi.USW:
|
||||
r.addUSW()
|
||||
u.exportUSW(r, v)
|
||||
case *unifi.USG:
|
||||
r.addUSG()
|
||||
u.exportUSG(r, v)
|
||||
case *unifi.UXG:
|
||||
r.addUXG()
|
||||
u.exportUXG(r, v)
|
||||
case *unifi.UDM:
|
||||
r.addUDM()
|
||||
u.exportUDM(r, v)
|
||||
case *unifi.Site:
|
||||
u.exportSite(r, v)
|
||||
case *unifi.Client:
|
||||
u.exportClient(r, v)
|
||||
default:
|
||||
u.LogErrorf("invalid type: %T", v)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/webserver"
|
||||
)
|
||||
|
||||
// Logf logs a message.
|
||||
func (u *promUnifi) Logf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "info"},
|
||||
})
|
||||
u.Collector.Logf(msg, v...)
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (u *promUnifi) LogErrorf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "error"},
|
||||
})
|
||||
u.Collector.LogErrorf(msg, v...)
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (u *promUnifi) LogDebugf(msg string, v ...interface{}) {
|
||||
webserver.NewOutputEvent(PluginName, PluginName, &webserver.Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "debug"},
|
||||
})
|
||||
u.Collector.LogDebugf(msg, v...)
|
||||
}
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
// This file contains the report interface.
|
||||
// This interface can be mocked and overridden for tests.
|
||||
|
||||
// report is an internal interface used to "process metrics".
|
||||
type report interface {
|
||||
done()
|
||||
send([]*metric)
|
||||
metrics() *poller.Metrics
|
||||
report(c poller.Logger, descs map[*prometheus.Desc]bool)
|
||||
export(m *metric, v float64) prometheus.Metric
|
||||
error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{})
|
||||
addUDM()
|
||||
addUXG()
|
||||
addUSG()
|
||||
addUAP()
|
||||
addUSW()
|
||||
}
|
||||
|
||||
// Satisfy gomnd.
|
||||
const oneDecimalPoint = 10.0
|
||||
|
||||
func (r *Report) done() {
|
||||
r.wg.Done()
|
||||
}
|
||||
|
||||
func (r *Report) send(m []*metric) {
|
||||
r.wg.Add(1) // notlint: gomnd
|
||||
r.ch <- m
|
||||
}
|
||||
|
||||
func (r *Report) metrics() *poller.Metrics {
|
||||
return r.Metrics
|
||||
}
|
||||
|
||||
func (r *Report) report(c poller.Logger, descs map[*prometheus.Desc]bool) {
|
||||
m := r.Metrics
|
||||
|
||||
c.Logf("UniFi Measurements Exported. Site: %d, Client: %d, "+
|
||||
"UAP: %d, USG/UDM: %d, USW: %d, DPI Site/Client: %d/%d, Desc: %d, "+
|
||||
"Metric: %d, Err: %d, 0s: %d, Req/Total: %v / %v",
|
||||
len(m.Sites), len(m.Clients), r.UAP, r.UDM+r.USG+r.UXG, r.USW, len(m.SitesDPI),
|
||||
len(m.ClientsDPI), len(descs), r.Total, r.Errors, r.Zeros,
|
||||
r.Fetch.Round(time.Millisecond/oneDecimalPoint),
|
||||
r.Elapsed.Round(time.Millisecond/oneDecimalPoint))
|
||||
}
|
||||
|
||||
func (r *Report) export(m *metric, v float64) prometheus.Metric {
|
||||
r.Total++
|
||||
|
||||
if v == 0 {
|
||||
r.Zeros++
|
||||
}
|
||||
|
||||
return prometheus.MustNewConstMetric(m.Desc, m.ValueType, v, m.Labels...)
|
||||
}
|
||||
|
||||
func (r *Report) error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{}) {
|
||||
r.Errors++
|
||||
|
||||
if r.ReportErrors {
|
||||
ch <- prometheus.NewInvalidMetric(d, fmt.Errorf("error: %v", v)) // nolint: goerr113
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) addUSW() {
|
||||
r.USW++
|
||||
}
|
||||
|
||||
func (r *Report) addUAP() {
|
||||
r.UAP++
|
||||
}
|
||||
|
||||
func (r *Report) addUSG() {
|
||||
r.USG++
|
||||
}
|
||||
|
||||
func (r *Report) addUDM() {
|
||||
r.UDM++
|
||||
}
|
||||
|
||||
func (r *Report) addUXG() {
|
||||
r.UXG++
|
||||
}
|
||||
|
||||
// close is not part of the interface.
|
||||
func (r *Report) close() {
|
||||
r.wg.Wait()
|
||||
r.Elapsed = time.Since(r.Start)
|
||||
close(r.ch)
|
||||
}
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
type site struct {
|
||||
NumUser *prometheus.Desc
|
||||
NumGuest *prometheus.Desc
|
||||
NumIot *prometheus.Desc
|
||||
TxBytesR *prometheus.Desc
|
||||
RxBytesR *prometheus.Desc
|
||||
NumAp *prometheus.Desc
|
||||
NumAdopted *prometheus.Desc
|
||||
NumDisabled *prometheus.Desc
|
||||
NumDisconnected *prometheus.Desc
|
||||
NumPending *prometheus.Desc
|
||||
NumGw *prometheus.Desc
|
||||
NumSw *prometheus.Desc
|
||||
NumSta *prometheus.Desc
|
||||
Latency *prometheus.Desc
|
||||
Drops *prometheus.Desc
|
||||
Uptime *prometheus.Desc
|
||||
XputUp *prometheus.Desc
|
||||
XputDown *prometheus.Desc
|
||||
SpeedtestPing *prometheus.Desc
|
||||
RemoteUserNumActive *prometheus.Desc
|
||||
RemoteUserNumInactive *prometheus.Desc
|
||||
RemoteUserRxBytes *prometheus.Desc
|
||||
RemoteUserTxBytes *prometheus.Desc
|
||||
RemoteUserRxPackets *prometheus.Desc
|
||||
RemoteUserTxPackets *prometheus.Desc
|
||||
DPITxPackets *prometheus.Desc
|
||||
DPIRxPackets *prometheus.Desc
|
||||
DPITxBytes *prometheus.Desc
|
||||
DPIRxBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
func descSite(ns string) *site {
|
||||
labels := []string{"subsystem", "status", "site_name", "source"}
|
||||
labelDPI := []string{"category", "application", "site_name", "source"}
|
||||
nd := prometheus.NewDesc
|
||||
|
||||
return &site{
|
||||
NumUser: nd(ns+"users", "Number of Users", labels, nil),
|
||||
NumGuest: nd(ns+"guests", "Number of Guests", labels, nil),
|
||||
NumIot: nd(ns+"iots", "Number of IoT Devices", labels, nil),
|
||||
TxBytesR: nd(ns+"transmit_rate_bytes", "Bytes Transmit Rate", labels, nil),
|
||||
RxBytesR: nd(ns+"receive_rate_bytes", "Bytes Receive Rate", labels, nil),
|
||||
NumAp: nd(ns+"aps", "Access Point Count", labels, nil),
|
||||
NumAdopted: nd(ns+"adopted", "Adoption Count", labels, nil),
|
||||
NumDisabled: nd(ns+"disabled", "Disabled Count", labels, nil),
|
||||
NumDisconnected: nd(ns+"disconnected", "Disconnected Count", labels, nil),
|
||||
NumPending: nd(ns+"pending", "Pending Count", labels, nil),
|
||||
NumGw: nd(ns+"gateways", "Gateway Count", labels, nil),
|
||||
NumSw: nd(ns+"switches", "Switch Count", labels, nil),
|
||||
NumSta: nd(ns+"stations", "Station Count", labels, nil),
|
||||
Latency: nd(ns+"latency_seconds", "Latency", labels, nil),
|
||||
Uptime: nd(ns+"uptime_seconds", "Uptime", labels, nil),
|
||||
Drops: nd(ns+"intenet_drops_total", "Internet (WAN) Disconnections", labels, nil),
|
||||
XputUp: nd(ns+"xput_up_rate", "Speedtest Upload", labels, nil),
|
||||
XputDown: nd(ns+"xput_down_rate", "Speedtest Download", labels, nil),
|
||||
SpeedtestPing: nd(ns+"speedtest_ping", "Speedtest Ping", labels, nil),
|
||||
RemoteUserNumActive: nd(ns+"remote_user_active", "Remote Users Active", labels, nil),
|
||||
RemoteUserNumInactive: nd(ns+"remote_user_inactive", "Remote Users Inactive", labels, nil),
|
||||
RemoteUserRxBytes: nd(ns+"remote_user_receive_bytes_total", "Remote Users Receive Bytes", labels, nil),
|
||||
RemoteUserTxBytes: nd(ns+"remote_user_transmit_bytes_total", "Remote Users Transmit Bytes", labels, nil),
|
||||
RemoteUserRxPackets: nd(ns+"remote_user_receive_packets_total", "Remote Users Receive Packets", labels, nil),
|
||||
RemoteUserTxPackets: nd(ns+"remote_user_transmit_packets_total", "Remote Users Transmit Packets", labels, nil),
|
||||
DPITxPackets: nd(ns+"dpi_transmit_packets", "Site DPI Transmit Packets", labelDPI, nil),
|
||||
DPIRxPackets: nd(ns+"dpi_receive_packets", "Site DPI Receive Packets", labelDPI, nil),
|
||||
DPITxBytes: nd(ns+"dpi_transmit_bytes", "Site DPI Transmit Bytes", labelDPI, nil),
|
||||
DPIRxBytes: nd(ns+"dpi_receive_bytes", "Site DPI Receive Bytes", labelDPI, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportSiteDPI(r report, v interface{}) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to SiteDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
labelDPI := []string{unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App), s.SiteName, s.SourceName}
|
||||
|
||||
// log.Println(labelsDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
|
||||
r.send([]*metric{
|
||||
{u.Site.DPITxPackets, gauge, dpi.TxPackets, labelDPI},
|
||||
{u.Site.DPIRxPackets, gauge, dpi.RxPackets, labelDPI},
|
||||
{u.Site.DPITxBytes, gauge, dpi.TxBytes, labelDPI},
|
||||
{u.Site.DPIRxBytes, gauge, dpi.RxBytes, labelDPI},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportSite(r report, s *unifi.Site) {
|
||||
for _, h := range s.Health {
|
||||
switch labels := []string{h.Subsystem, h.Status, s.SiteName, s.SourceName}; labels[0] {
|
||||
case "www":
|
||||
r.send([]*metric{
|
||||
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
|
||||
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
|
||||
{u.Site.Uptime, gauge, h.Uptime, labels},
|
||||
{u.Site.Latency, gauge, h.Latency.Val / 1000, labels},
|
||||
{u.Site.XputUp, gauge, h.XputUp, labels},
|
||||
{u.Site.XputDown, gauge, h.XputDown, labels},
|
||||
{u.Site.SpeedtestPing, gauge, h.SpeedtestPing, labels},
|
||||
{u.Site.Drops, counter, h.Drops, labels},
|
||||
})
|
||||
case "wlan":
|
||||
r.send([]*metric{
|
||||
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
|
||||
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
|
||||
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
|
||||
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
|
||||
{u.Site.NumPending, gauge, h.NumPending, labels},
|
||||
{u.Site.NumUser, gauge, h.NumUser, labels},
|
||||
{u.Site.NumGuest, gauge, h.NumGuest, labels},
|
||||
{u.Site.NumIot, gauge, h.NumIot, labels},
|
||||
{u.Site.NumAp, gauge, h.NumAp, labels},
|
||||
{u.Site.NumDisabled, gauge, h.NumDisabled, labels},
|
||||
})
|
||||
case "wan":
|
||||
r.send([]*metric{
|
||||
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
|
||||
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
|
||||
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
|
||||
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
|
||||
{u.Site.NumPending, gauge, h.NumPending, labels},
|
||||
{u.Site.NumGw, gauge, h.NumGw, labels},
|
||||
{u.Site.NumSta, gauge, h.NumSta, labels},
|
||||
})
|
||||
case "lan":
|
||||
r.send([]*metric{
|
||||
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
|
||||
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
|
||||
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
|
||||
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
|
||||
{u.Site.NumPending, gauge, h.NumPending, labels},
|
||||
{u.Site.NumUser, gauge, h.NumUser, labels},
|
||||
{u.Site.NumGuest, gauge, h.NumGuest, labels},
|
||||
{u.Site.NumIot, gauge, h.NumIot, labels},
|
||||
{u.Site.NumSw, gauge, h.NumSw, labels},
|
||||
})
|
||||
case "vpn":
|
||||
r.send([]*metric{
|
||||
{u.Site.RemoteUserNumActive, gauge, h.RemoteUserNumActive, labels},
|
||||
{u.Site.RemoteUserNumInactive, gauge, h.RemoteUserNumInactive, labels},
|
||||
{u.Site.RemoteUserRxBytes, counter, h.RemoteUserRxBytes, labels},
|
||||
{u.Site.RemoteUserTxBytes, counter, h.RemoteUserTxBytes, labels},
|
||||
{u.Site.RemoteUserRxPackets, counter, h.RemoteUserRxPackets, labels},
|
||||
{u.Site.RemoteUserTxPackets, counter, h.RemoteUserTxPackets, labels},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,376 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
type uap struct {
|
||||
// Ap Traffic Stats
|
||||
ApWifiTxDropped *prometheus.Desc
|
||||
ApRxErrors *prometheus.Desc
|
||||
ApRxDropped *prometheus.Desc
|
||||
ApRxFrags *prometheus.Desc
|
||||
ApRxCrypts *prometheus.Desc
|
||||
ApTxPackets *prometheus.Desc
|
||||
ApTxBytes *prometheus.Desc
|
||||
ApTxErrors *prometheus.Desc
|
||||
ApTxDropped *prometheus.Desc
|
||||
ApTxRetries *prometheus.Desc
|
||||
ApRxPackets *prometheus.Desc
|
||||
ApRxBytes *prometheus.Desc
|
||||
WifiTxAttempts *prometheus.Desc
|
||||
MacFilterRejections *prometheus.Desc
|
||||
// VAP Stats
|
||||
VAPCcq *prometheus.Desc
|
||||
VAPMacFilterRejections *prometheus.Desc
|
||||
VAPNumSatisfactionSta *prometheus.Desc
|
||||
VAPAvgClientSignal *prometheus.Desc
|
||||
VAPSatisfaction *prometheus.Desc
|
||||
VAPSatisfactionNow *prometheus.Desc
|
||||
VAPDNSAvgLatency *prometheus.Desc
|
||||
VAPRxBytes *prometheus.Desc
|
||||
VAPRxCrypts *prometheus.Desc
|
||||
VAPRxDropped *prometheus.Desc
|
||||
VAPRxErrors *prometheus.Desc
|
||||
VAPRxFrags *prometheus.Desc
|
||||
VAPRxNwids *prometheus.Desc
|
||||
VAPRxPackets *prometheus.Desc
|
||||
VAPTxBytes *prometheus.Desc
|
||||
VAPTxDropped *prometheus.Desc
|
||||
VAPTxErrors *prometheus.Desc
|
||||
VAPTxPackets *prometheus.Desc
|
||||
VAPTxPower *prometheus.Desc
|
||||
VAPTxRetries *prometheus.Desc
|
||||
VAPTxCombinedRetries *prometheus.Desc
|
||||
VAPTxDataMpduBytes *prometheus.Desc
|
||||
VAPTxRtsRetries *prometheus.Desc
|
||||
VAPTxSuccess *prometheus.Desc
|
||||
VAPTxTotal *prometheus.Desc
|
||||
VAPTxGoodbytes *prometheus.Desc
|
||||
VAPTxLatAvg *prometheus.Desc
|
||||
VAPTxLatMax *prometheus.Desc
|
||||
VAPTxLatMin *prometheus.Desc
|
||||
VAPRxGoodbytes *prometheus.Desc
|
||||
VAPRxLatAvg *prometheus.Desc
|
||||
VAPRxLatMax *prometheus.Desc
|
||||
VAPRxLatMin *prometheus.Desc
|
||||
VAPWifiTxLatencyMovAvg *prometheus.Desc
|
||||
VAPWifiTxLatencyMovMax *prometheus.Desc
|
||||
VAPWifiTxLatencyMovMin *prometheus.Desc
|
||||
VAPWifiTxLatencyMovTotal *prometheus.Desc
|
||||
VAPWifiTxLatencyMovCount *prometheus.Desc
|
||||
// Radio Stats
|
||||
RadioCurrentAntennaGain *prometheus.Desc
|
||||
RadioHt *prometheus.Desc
|
||||
RadioMaxTxpower *prometheus.Desc
|
||||
RadioMinTxpower *prometheus.Desc
|
||||
RadioNss *prometheus.Desc
|
||||
RadioRadioCaps *prometheus.Desc
|
||||
RadioTxPower *prometheus.Desc
|
||||
RadioAstBeXmit *prometheus.Desc
|
||||
RadioChannel *prometheus.Desc
|
||||
RadioCuSelfRx *prometheus.Desc
|
||||
RadioCuSelfTx *prometheus.Desc
|
||||
RadioCuTotal *prometheus.Desc
|
||||
RadioExtchannel *prometheus.Desc
|
||||
RadioGain *prometheus.Desc
|
||||
RadioNumSta *prometheus.Desc
|
||||
RadioTxPackets *prometheus.Desc
|
||||
RadioTxRetries *prometheus.Desc
|
||||
}
|
||||
|
||||
type rogueap struct {
|
||||
Age *prometheus.Desc
|
||||
BW *prometheus.Desc
|
||||
CenterFreq *prometheus.Desc
|
||||
Channel *prometheus.Desc
|
||||
Freq *prometheus.Desc
|
||||
Noise *prometheus.Desc
|
||||
RSSI *prometheus.Desc
|
||||
RSSIAge *prometheus.Desc
|
||||
Signal *prometheus.Desc
|
||||
}
|
||||
|
||||
func descRogueAP(ns string) *rogueap {
|
||||
label := []string{
|
||||
"security", "oui", "band", "mac", "ap_mac", "radio", "radio_name", "site_name", "name", "source",
|
||||
}
|
||||
|
||||
return &rogueap{
|
||||
Age: prometheus.NewDesc(ns+"age", "RogueAP Age", label, nil),
|
||||
BW: prometheus.NewDesc(ns+"bw", "RogueAP BW", label, nil),
|
||||
CenterFreq: prometheus.NewDesc(ns+"center_freq", "RogueAP Center Frequency", label, nil),
|
||||
Channel: prometheus.NewDesc(ns+"channel", "RogueAP Channel", label, nil),
|
||||
Freq: prometheus.NewDesc(ns+"frequency", "RogueAP Frequency", label, nil),
|
||||
Noise: prometheus.NewDesc(ns+"noise", "RogueAP Noise", label, nil),
|
||||
RSSI: prometheus.NewDesc(ns+"rssi", "RogueAP RSSI", label, nil),
|
||||
RSSIAge: prometheus.NewDesc(ns+"rssi_age", "RogueAP RSSI Age", label, nil),
|
||||
Signal: prometheus.NewDesc(ns+"signal", "RogueAP Signal", label, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func descUAP(ns string) *uap { // nolint: funlen
|
||||
labelA := []string{"stat", "site_name", "name", "source"} // stat + labels[1:]
|
||||
labelV := []string{"vap_name", "bssid", "radio", "radio_name", "essid", "usage", "site_name", "name", "source"}
|
||||
labelR := []string{"radio_name", "radio", "site_name", "name", "source"}
|
||||
nd := prometheus.NewDesc
|
||||
|
||||
return &uap{
|
||||
// 3x each - stat table: total, guest, user
|
||||
ApWifiTxDropped: nd(ns+"stat_wifi_transmt_dropped_total", "Wifi Transmissions Dropped", labelA, nil),
|
||||
ApRxErrors: nd(ns+"stat_receive_errors_total", "Receive Errors", labelA, nil),
|
||||
ApRxDropped: nd(ns+"stat_receive_dropped_total", "Receive Dropped", labelA, nil),
|
||||
ApRxFrags: nd(ns+"stat_receive_frags_total", "Received Frags", labelA, nil),
|
||||
ApRxCrypts: nd(ns+"stat_receive_crypts_total", "Receive Crypts", labelA, nil),
|
||||
ApTxPackets: nd(ns+"stat_transmit_packets_total", "Transmit Packets", labelA, nil),
|
||||
ApTxBytes: nd(ns+"stat_transmit_bytes_total", "Transmit Bytes", labelA, nil),
|
||||
ApTxErrors: nd(ns+"stat_transmit_errors_total", "Transmit Errors", labelA, nil),
|
||||
ApTxDropped: nd(ns+"stat_transmit_dropped_total", "Transmit Dropped", labelA, nil),
|
||||
ApTxRetries: nd(ns+"stat_retries_tx_total", "Transmit Retries", labelA, nil),
|
||||
ApRxPackets: nd(ns+"stat_receive_packets_total", "Receive Packets", labelA, nil),
|
||||
ApRxBytes: nd(ns+"stat_receive_bytes_total", "Receive Bytes", labelA, nil),
|
||||
WifiTxAttempts: nd(ns+"stat_wifi_transmit_attempts_total", "Wifi Transmission Attempts", labelA, nil),
|
||||
MacFilterRejections: nd(ns+"stat_mac_filter_rejects_total", "MAC Filter Rejections", labelA, nil),
|
||||
// N each - 1 per Virtual AP (VAP)
|
||||
VAPCcq: nd(ns+"vap_ccq_ratio", "VAP Client Connection Quality", labelV, nil),
|
||||
VAPMacFilterRejections: nd(ns+"vap_mac_filter_rejects_total", "VAP MAC Filter Rejections", labelV, nil),
|
||||
VAPNumSatisfactionSta: nd(ns+"vap_satisfaction_stations", "VAP Number Satisifaction Stations", labelV, nil),
|
||||
VAPAvgClientSignal: nd(ns+"vap_average_client_signal", "VAP Average Client Signal", labelV, nil),
|
||||
VAPSatisfaction: nd(ns+"vap_satisfaction_ratio", "VAP Satisfaction", labelV, nil),
|
||||
VAPSatisfactionNow: nd(ns+"vap_satisfaction_now_ratio", "VAP Satisfaction Now", labelV, nil),
|
||||
VAPDNSAvgLatency: nd(ns+"vap_dns_latency_average_seconds", "VAP DNS Latency Average", labelV, nil),
|
||||
VAPRxBytes: nd(ns+"vap_receive_bytes_total", "VAP Bytes Received", labelV, nil),
|
||||
VAPRxCrypts: nd(ns+"vap_receive_crypts_total", "VAP Crypts Received", labelV, nil),
|
||||
VAPRxDropped: nd(ns+"vap_receive_dropped_total", "VAP Dropped Received", labelV, nil),
|
||||
VAPRxErrors: nd(ns+"vap_receive_errors_total", "VAP Errors Received", labelV, nil),
|
||||
VAPRxFrags: nd(ns+"vap_receive_frags_total", "VAP Frags Received", labelV, nil),
|
||||
VAPRxNwids: nd(ns+"vap_receive_nwids_total", "VAP Nwids Received", labelV, nil),
|
||||
VAPRxPackets: nd(ns+"vap_receive_packets_total", "VAP Packets Received", labelV, nil),
|
||||
VAPTxBytes: nd(ns+"vap_transmit_bytes_total", "VAP Bytes Transmitted", labelV, nil),
|
||||
VAPTxDropped: nd(ns+"vap_transmit_dropped_total", "VAP Dropped Transmitted", labelV, nil),
|
||||
VAPTxErrors: nd(ns+"vap_transmit_errors_total", "VAP Errors Transmitted", labelV, nil),
|
||||
VAPTxPackets: nd(ns+"vap_transmit_packets_total", "VAP Packets Transmitted", labelV, nil),
|
||||
VAPTxPower: nd(ns+"vap_transmit_power", "VAP Transmit Power", labelV, nil),
|
||||
VAPTxRetries: nd(ns+"vap_transmit_retries_total", "VAP Retries Transmitted", labelV, nil),
|
||||
VAPTxCombinedRetries: nd(ns+"vap_transmit_retries_combined_total", "VAP Retries Combined Tx", labelV, nil),
|
||||
VAPTxDataMpduBytes: nd(ns+"vap_data_mpdu_transmit_bytes_total", "VAP Data MPDU Bytes Tx", labelV, nil),
|
||||
VAPTxRtsRetries: nd(ns+"vap_transmit_rts_retries_total", "VAP RTS Retries Transmitted", labelV, nil),
|
||||
VAPTxSuccess: nd(ns+"vap_transmit_success_total", "VAP Success Transmits", labelV, nil),
|
||||
VAPTxTotal: nd(ns+"vap_transmit_total", "VAP Transmit Total", labelV, nil),
|
||||
VAPTxGoodbytes: nd(ns+"vap_transmit_goodbyes", "VAP Goodbyes Transmitted", labelV, nil),
|
||||
VAPTxLatAvg: nd(ns+"vap_transmit_latency_average_seconds", "VAP Latency Average Tx", labelV, nil),
|
||||
VAPTxLatMax: nd(ns+"vap_transmit_latency_maximum_seconds", "VAP Latency Maximum Tx", labelV, nil),
|
||||
VAPTxLatMin: nd(ns+"vap_transmit_latency_minimum_seconds", "VAP Latency Minimum Tx", labelV, nil),
|
||||
VAPRxGoodbytes: nd(ns+"vap_receive_goodbyes", "VAP Goodbyes Received", labelV, nil),
|
||||
VAPRxLatAvg: nd(ns+"vap_receive_latency_average_seconds", "VAP Latency Average Rx", labelV, nil),
|
||||
VAPRxLatMax: nd(ns+"vap_receive_latency_maximum_seconds", "VAP Latency Maximum Rx", labelV, nil),
|
||||
VAPRxLatMin: nd(ns+"vap_receive_latency_minimum_seconds", "VAP Latency Minimum Rx", labelV, nil),
|
||||
VAPWifiTxLatencyMovAvg: nd(ns+"vap_transmit_latency_moving_avg_seconds", "VAP Latency Moving Avg Tx", labelV, nil),
|
||||
VAPWifiTxLatencyMovMax: nd(ns+"vap_transmit_latency_moving_max_seconds", "VAP Latency Moving Min Tx", labelV, nil),
|
||||
VAPWifiTxLatencyMovMin: nd(ns+"vap_transmit_latency_moving_min_seconds", "VAP Latency Moving Max Tx", labelV, nil),
|
||||
VAPWifiTxLatencyMovTotal: nd(ns+"vap_transmit_latency_moving_total", "VAP Latency Moving Total Tramsit", labelV, nil),
|
||||
VAPWifiTxLatencyMovCount: nd(ns+"vap_transmit_latency_moving_count", "VAP Latency Moving Count Tramsit", labelV, nil),
|
||||
// N each - 1 per Radio. 1-4 radios per AP usually
|
||||
RadioCurrentAntennaGain: nd(ns+"radio_current_antenna_gain", "Radio Current Antenna Gain", labelR, nil),
|
||||
RadioHt: nd(ns+"radio_ht", "Radio HT", labelR, nil),
|
||||
RadioMaxTxpower: nd(ns+"radio_max_transmit_power", "Radio Maximum Transmit Power", labelR, nil),
|
||||
RadioMinTxpower: nd(ns+"radio_min_transmit_power", "Radio Minimum Transmit Power", labelR, nil),
|
||||
RadioNss: nd(ns+"radio_nss", "Radio Nss", labelR, nil),
|
||||
RadioRadioCaps: nd(ns+"radio_caps", "Radio Capabilities", labelR, nil),
|
||||
RadioTxPower: nd(ns+"radio_transmit_power", "Radio Transmit Power", labelR, nil),
|
||||
RadioAstBeXmit: nd(ns+"radio_ast_be_xmit", "Radio AstBe Transmit", labelR, nil),
|
||||
RadioChannel: nd(ns+"radio_channel", "Radio Channel", labelR, nil),
|
||||
RadioCuSelfRx: nd(ns+"radio_channel_utilization_receive_ratio", "Channel Utilization Rx", labelR, nil),
|
||||
RadioCuSelfTx: nd(ns+"radio_channel_utilization_transmit_ratio", "Channel Utilization Tx", labelR, nil),
|
||||
RadioCuTotal: nd(ns+"radio_channel_utilization_total_ratio", "Channel Utilization Total", labelR, nil),
|
||||
RadioExtchannel: nd(ns+"radio_ext_channel", "Radio Ext Channel", labelR, nil),
|
||||
RadioGain: nd(ns+"radio_gain", "Radio Gain", labelR, nil),
|
||||
RadioNumSta: nd(ns+"radio_stations", "Radio Total Station Count", append(labelR, "station_type"), nil),
|
||||
RadioTxPackets: nd(ns+"radio_transmit_packets", "Radio Transmitted Packets", labelR, nil),
|
||||
RadioTxRetries: nd(ns+"radio_transmit_retries", "Radio Transmit Retries", labelR, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportRogueAP(r report, d *unifi.RogueAP) {
|
||||
if d.Age.Val == 0 {
|
||||
return // only keep things that are recent.
|
||||
}
|
||||
|
||||
labels := []string{
|
||||
d.Security, d.Oui, d.Band, d.Bssid, d.ApMac, d.Radio, d.RadioName, d.SiteName, d.Essid, d.SourceName,
|
||||
}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.RogueAP.Age, gauge, d.Age.Val, labels},
|
||||
{u.RogueAP.BW, gauge, d.Bw.Val, labels},
|
||||
{u.RogueAP.CenterFreq, gauge, d.CenterFreq.Val, labels},
|
||||
{u.RogueAP.Channel, gauge, d.Channel, labels},
|
||||
{u.RogueAP.Freq, gauge, d.Freq.Val, labels},
|
||||
{u.RogueAP.Noise, gauge, d.Noise.Val, labels},
|
||||
{u.RogueAP.RSSI, gauge, d.Rssi.Val, labels},
|
||||
{u.RogueAP.RSSIAge, gauge, d.RssiAge.Val, labels},
|
||||
{u.RogueAP.Signal, gauge, d.Signal.Val, labels},
|
||||
})
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportUAP(r report, d *unifi.UAP) {
|
||||
if !d.Adopted.Val || d.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
|
||||
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID}
|
||||
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
|
||||
u.exportVAPtable(r, labels, d.VapTable)
|
||||
u.exportPRTtable(r, labels, d.PortTable)
|
||||
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
|
||||
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
|
||||
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta)
|
||||
u.exportRADtable(r, labels, d.RadioTable, d.RadioTableStats)
|
||||
r.send([]*metric{
|
||||
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
|
||||
{u.Device.Uptime, gauge, d.Uptime, labels},
|
||||
})
|
||||
}
|
||||
|
||||
// udm doesn't have these stats exposed yet, so pass 2 or 6 metrics.
|
||||
func (u *promUnifi) exportUAPstats(r report, labels []string, ap *unifi.Ap, bytes ...unifi.FlexInt) {
|
||||
if ap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
labelU := []string{"user", labels[1], labels[2], labels[3]}
|
||||
labelG := []string{"guest", labels[1], labels[2], labels[3]}
|
||||
r.send([]*metric{
|
||||
// ap only stuff.
|
||||
{u.Device.BytesD, counter, bytes[0], labels}, // not sure if these 3 Ds are counters or gauges.
|
||||
{u.Device.TxBytesD, counter, bytes[1], labels}, // not sure if these 3 Ds are counters or gauges.
|
||||
{u.Device.RxBytesD, counter, bytes[2], labels}, // not sure if these 3 Ds are counters or gauges.
|
||||
{u.Device.BytesR, gauge, bytes[3], labels}, // only UAP has this one, and those ^ weird.
|
||||
// user
|
||||
{u.UAP.ApWifiTxDropped, counter, ap.UserWifiTxDropped, labelU},
|
||||
{u.UAP.ApRxErrors, counter, ap.UserRxErrors, labelU},
|
||||
{u.UAP.ApRxDropped, counter, ap.UserRxDropped, labelU},
|
||||
{u.UAP.ApRxFrags, counter, ap.UserRxFrags, labelU},
|
||||
{u.UAP.ApRxCrypts, counter, ap.UserRxCrypts, labelU},
|
||||
{u.UAP.ApTxPackets, counter, ap.UserTxPackets, labelU},
|
||||
{u.UAP.ApTxBytes, counter, ap.UserTxBytes, labelU},
|
||||
{u.UAP.ApTxErrors, counter, ap.UserTxErrors, labelU},
|
||||
{u.UAP.ApTxDropped, counter, ap.UserTxDropped, labelU},
|
||||
{u.UAP.ApTxRetries, counter, ap.UserTxRetries, labelU},
|
||||
{u.UAP.ApRxPackets, counter, ap.UserRxPackets, labelU},
|
||||
{u.UAP.ApRxBytes, counter, ap.UserRxBytes, labelU},
|
||||
{u.UAP.WifiTxAttempts, counter, ap.UserWifiTxAttempts, labelU},
|
||||
{u.UAP.MacFilterRejections, counter, ap.UserMacFilterRejections, labelU},
|
||||
// guest
|
||||
{u.UAP.ApWifiTxDropped, counter, ap.GuestWifiTxDropped, labelG},
|
||||
{u.UAP.ApRxErrors, counter, ap.GuestRxErrors, labelG},
|
||||
{u.UAP.ApRxDropped, counter, ap.GuestRxDropped, labelG},
|
||||
{u.UAP.ApRxFrags, counter, ap.GuestRxFrags, labelG},
|
||||
{u.UAP.ApRxCrypts, counter, ap.GuestRxCrypts, labelG},
|
||||
{u.UAP.ApTxPackets, counter, ap.GuestTxPackets, labelG},
|
||||
{u.UAP.ApTxBytes, counter, ap.GuestTxBytes, labelG},
|
||||
{u.UAP.ApTxErrors, counter, ap.GuestTxErrors, labelG},
|
||||
{u.UAP.ApTxDropped, counter, ap.GuestTxDropped, labelG},
|
||||
{u.UAP.ApTxRetries, counter, ap.GuestTxRetries, labelG},
|
||||
{u.UAP.ApRxPackets, counter, ap.GuestRxPackets, labelG},
|
||||
{u.UAP.ApRxBytes, counter, ap.GuestRxBytes, labelG},
|
||||
{u.UAP.WifiTxAttempts, counter, ap.GuestWifiTxAttempts, labelG},
|
||||
{u.UAP.MacFilterRejections, counter, ap.GuestMacFilterRejections, labelG},
|
||||
})
|
||||
}
|
||||
|
||||
// UAP VAP Table.
|
||||
func (u *promUnifi) exportVAPtable(r report, labels []string, vt unifi.VapTable) {
|
||||
// vap table stats
|
||||
for _, v := range vt {
|
||||
if !v.Up.Val {
|
||||
continue
|
||||
}
|
||||
|
||||
labelV := []string{v.Name, v.Bssid, v.Radio, v.RadioName, v.Essid, v.Usage, labels[1], labels[2], labels[3]}
|
||||
r.send([]*metric{
|
||||
{u.UAP.VAPCcq, gauge, float64(v.Ccq) / 1000.0, labelV},
|
||||
{u.UAP.VAPMacFilterRejections, counter, v.MacFilterRejections, labelV},
|
||||
{u.UAP.VAPNumSatisfactionSta, gauge, v.NumSatisfactionSta, labelV},
|
||||
{u.UAP.VAPAvgClientSignal, gauge, v.AvgClientSignal.Val, labelV},
|
||||
{u.UAP.VAPSatisfaction, gauge, v.Satisfaction.Val / 100.0, labelV},
|
||||
{u.UAP.VAPSatisfactionNow, gauge, v.SatisfactionNow.Val / 100.0, labelV},
|
||||
{u.UAP.VAPDNSAvgLatency, gauge, v.DNSAvgLatency.Val / 1000, labelV},
|
||||
{u.UAP.VAPRxBytes, counter, v.RxBytes, labelV},
|
||||
{u.UAP.VAPRxCrypts, counter, v.RxCrypts, labelV},
|
||||
{u.UAP.VAPRxDropped, counter, v.RxDropped, labelV},
|
||||
{u.UAP.VAPRxErrors, counter, v.RxErrors, labelV},
|
||||
{u.UAP.VAPRxFrags, counter, v.RxFrags, labelV},
|
||||
{u.UAP.VAPRxNwids, counter, v.RxNwids, labelV},
|
||||
{u.UAP.VAPRxPackets, counter, v.RxPackets, labelV},
|
||||
{u.UAP.VAPTxBytes, counter, v.TxBytes, labelV},
|
||||
{u.UAP.VAPTxDropped, counter, v.TxDropped, labelV},
|
||||
{u.UAP.VAPTxErrors, counter, v.TxErrors, labelV},
|
||||
{u.UAP.VAPTxPackets, counter, v.TxPackets, labelV},
|
||||
{u.UAP.VAPTxPower, gauge, v.TxPower, labelV},
|
||||
{u.UAP.VAPTxRetries, counter, v.TxRetries, labelV},
|
||||
{u.UAP.VAPTxCombinedRetries, counter, v.TxCombinedRetries, labelV},
|
||||
{u.UAP.VAPTxDataMpduBytes, counter, v.TxDataMpduBytes, labelV},
|
||||
{u.UAP.VAPTxRtsRetries, counter, v.TxRtsRetries, labelV},
|
||||
{u.UAP.VAPTxTotal, counter, v.TxTotal, labelV},
|
||||
{u.UAP.VAPTxGoodbytes, counter, v.TxTCPStats.Goodbytes, labelV},
|
||||
{u.UAP.VAPTxLatAvg, gauge, v.TxTCPStats.LatAvg.Val / 1000, labelV},
|
||||
{u.UAP.VAPTxLatMax, gauge, v.TxTCPStats.LatMax.Val / 1000, labelV},
|
||||
{u.UAP.VAPTxLatMin, gauge, v.TxTCPStats.LatMin.Val / 1000, labelV},
|
||||
{u.UAP.VAPRxGoodbytes, counter, v.RxTCPStats.Goodbytes, labelV},
|
||||
{u.UAP.VAPRxLatAvg, gauge, v.RxTCPStats.LatAvg.Val / 1000, labelV},
|
||||
{u.UAP.VAPRxLatMax, gauge, v.RxTCPStats.LatMax.Val / 1000, labelV},
|
||||
{u.UAP.VAPRxLatMin, gauge, v.RxTCPStats.LatMin.Val / 1000, labelV},
|
||||
{u.UAP.VAPWifiTxLatencyMovAvg, gauge, v.WifiTxLatencyMov.Avg.Val / 1000, labelV},
|
||||
{u.UAP.VAPWifiTxLatencyMovMax, gauge, v.WifiTxLatencyMov.Max.Val / 1000, labelV},
|
||||
{u.UAP.VAPWifiTxLatencyMovMin, gauge, v.WifiTxLatencyMov.Min.Val / 1000, labelV},
|
||||
{u.UAP.VAPWifiTxLatencyMovTotal, counter, v.WifiTxLatencyMov.Total, labelV}, // not sure if gauge or counter.
|
||||
{u.UAP.VAPWifiTxLatencyMovCount, counter, v.WifiTxLatencyMov.TotalCount, labelV}, // not sure if gauge or counter.
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// UAP Radio Table.
|
||||
func (u *promUnifi) exportRADtable(r report, labels []string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
|
||||
// radio table
|
||||
for _, p := range rt {
|
||||
labelR := []string{p.Name, p.Radio, labels[1], labels[2], labels[3]}
|
||||
labelRUser := append(labelR, "user")
|
||||
labelRGuest := append(labelR, "guest")
|
||||
|
||||
r.send([]*metric{
|
||||
{u.UAP.RadioCurrentAntennaGain, gauge, p.CurrentAntennaGain, labelR},
|
||||
{u.UAP.RadioHt, gauge, p.Ht, labelR},
|
||||
{u.UAP.RadioMaxTxpower, gauge, p.MaxTxpower, labelR},
|
||||
{u.UAP.RadioMinTxpower, gauge, p.MinTxpower, labelR},
|
||||
{u.UAP.RadioNss, gauge, p.Nss, labelR},
|
||||
{u.UAP.RadioRadioCaps, gauge, p.RadioCaps, labelR},
|
||||
})
|
||||
|
||||
// combine radio table with radio stats table.
|
||||
for _, t := range rts {
|
||||
if t.Name != p.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.UAP.RadioTxPower, gauge, t.TxPower, labelR},
|
||||
{u.UAP.RadioAstBeXmit, gauge, t.AstBeXmit, labelR},
|
||||
{u.UAP.RadioChannel, gauge, t.Channel, labelR},
|
||||
{u.UAP.RadioCuSelfRx, gauge, t.CuSelfRx.Val / 100.0, labelR},
|
||||
{u.UAP.RadioCuSelfTx, gauge, t.CuSelfTx.Val / 100.0, labelR},
|
||||
{u.UAP.RadioCuTotal, gauge, t.CuTotal.Val / 100.0, labelR},
|
||||
{u.UAP.RadioExtchannel, gauge, t.Extchannel, labelR},
|
||||
{u.UAP.RadioGain, gauge, t.Gain, labelR},
|
||||
{u.UAP.RadioNumSta, gauge, t.GuestNumSta, labelRGuest},
|
||||
{u.UAP.RadioNumSta, gauge, t.UserNumSta, labelRUser},
|
||||
{u.UAP.RadioTxPackets, gauge, t.TxPackets, labelR},
|
||||
{u.UAP.RadioTxRetries, gauge, t.TxRetries, labelR},
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// These are shared by all four device types: UDM, UAP, USG, USW.
|
||||
type unifiDevice struct {
|
||||
Info *prometheus.Desc
|
||||
Uptime *prometheus.Desc
|
||||
Temperature *prometheus.Desc
|
||||
Storage *prometheus.Desc
|
||||
TotalMaxPower *prometheus.Desc // sw only
|
||||
FanLevel *prometheus.Desc // sw only
|
||||
TotalTxBytes *prometheus.Desc
|
||||
TotalRxBytes *prometheus.Desc
|
||||
TotalBytes *prometheus.Desc
|
||||
BytesR *prometheus.Desc // ap only
|
||||
BytesD *prometheus.Desc // ap only
|
||||
TxBytesD *prometheus.Desc // ap only
|
||||
RxBytesD *prometheus.Desc // ap only
|
||||
Counter *prometheus.Desc
|
||||
Loadavg1 *prometheus.Desc
|
||||
Loadavg5 *prometheus.Desc
|
||||
Loadavg15 *prometheus.Desc
|
||||
MemBuffer *prometheus.Desc
|
||||
MemTotal *prometheus.Desc
|
||||
MemUsed *prometheus.Desc
|
||||
CPU *prometheus.Desc
|
||||
Mem *prometheus.Desc
|
||||
}
|
||||
|
||||
func descDevice(ns string) *unifiDevice {
|
||||
labels := []string{"type", "site_name", "name", "source"}
|
||||
infoLabels := []string{"version", "model", "serial", "mac", "ip", "id"}
|
||||
|
||||
return &unifiDevice{
|
||||
Info: prometheus.NewDesc(ns+"info", "Device Information", append(labels, infoLabels...), nil),
|
||||
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Device Uptime", labels, nil),
|
||||
Temperature: prometheus.NewDesc(ns+"temperature_celsius", "Temperature",
|
||||
append(labels, "temp_area", "temp_type"), nil),
|
||||
Storage: prometheus.NewDesc(ns+"storage", "Storage",
|
||||
append(labels, "mountpoint", "storage_name", "storage_reading"), nil),
|
||||
TotalMaxPower: prometheus.NewDesc(ns+"max_power_total", "Total Max Power", labels, nil),
|
||||
FanLevel: prometheus.NewDesc(ns+"fan_level", "Fan Level", labels, nil),
|
||||
TotalTxBytes: prometheus.NewDesc(ns+"transmit_bytes_total", "Total Transmitted Bytes", labels, nil),
|
||||
TotalRxBytes: prometheus.NewDesc(ns+"receive_bytes_total", "Total Received Bytes", labels, nil),
|
||||
TotalBytes: prometheus.NewDesc(ns+"bytes_total", "Total Bytes Transferred", labels, nil),
|
||||
BytesR: prometheus.NewDesc(ns+"rate_bytes", "Transfer Rate", labels, nil),
|
||||
BytesD: prometheus.NewDesc(ns+"d_bytes", "Total Bytes D???", labels, nil),
|
||||
TxBytesD: prometheus.NewDesc(ns+"d_tranmsit_bytes", "Transmit Bytes D???", labels, nil),
|
||||
RxBytesD: prometheus.NewDesc(ns+"d_receive_bytes", "Receive Bytes D???", labels, nil),
|
||||
Counter: prometheus.NewDesc(ns+"stations", "Number of Stations", append(labels, "station_type"), nil),
|
||||
Loadavg1: prometheus.NewDesc(ns+"load_average_1", "System Load Average 1 Minute", labels, nil),
|
||||
Loadavg5: prometheus.NewDesc(ns+"load_average_5", "System Load Average 5 Minutes", labels, nil),
|
||||
Loadavg15: prometheus.NewDesc(ns+"load_average_15", "System Load Average 15 Minutes", labels, nil),
|
||||
MemUsed: prometheus.NewDesc(ns+"memory_used_bytes", "System Memory Used", labels, nil),
|
||||
MemTotal: prometheus.NewDesc(ns+"memory_installed_bytes", "System Installed Memory", labels, nil),
|
||||
MemBuffer: prometheus.NewDesc(ns+"memory_buffer_bytes", "System Memory Buffer", labels, nil),
|
||||
CPU: prometheus.NewDesc(ns+"cpu_utilization_ratio", "System CPU % Utilized", labels, nil),
|
||||
Mem: prometheus.NewDesc(ns+"memory_utilization_ratio", "System Memory % Utilized", labels, nil),
|
||||
}
|
||||
}
|
||||
|
||||
// UDM is a collection of stats from USG, USW and UAP. It has no unique stats.
|
||||
func (u *promUnifi) exportUDM(r report, d *unifi.UDM) {
|
||||
if !d.Adopted.Val || d.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
|
||||
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID}
|
||||
// Shared data (all devices do this).
|
||||
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
|
||||
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
|
||||
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta, d.NumDesktop, d.NumMobile, d.NumHandheld)
|
||||
// Switch Data
|
||||
u.exportUSWstats(r, labels, d.Stat.Sw)
|
||||
u.exportPRTtable(r, labels, d.PortTable)
|
||||
// Gateway Data
|
||||
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
|
||||
u.exportUSGstats(r, labels, d.Stat.Gw, d.SpeedtestStatus, d.Uplink)
|
||||
// Dream Machine System Data.
|
||||
r.send([]*metric{
|
||||
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
|
||||
{u.Device.Uptime, gauge, d.Uptime, labels},
|
||||
})
|
||||
|
||||
// UDM pro has special temp sensors. UDM non-pro may not have temp; not sure.
|
||||
for _, t := range d.Temperatures {
|
||||
r.send([]*metric{{u.Device.Temperature, gauge, t.Value, append(labels, t.Name, t.Type)}})
|
||||
}
|
||||
|
||||
// UDM pro and UXG have hard drives.
|
||||
for _, t := range d.Storage {
|
||||
r.send([]*metric{
|
||||
{u.Device.Storage, gauge, t.Size.Val, append(labels, t.MountPoint, t.Name, "size")},
|
||||
{u.Device.Storage, gauge, t.Used.Val, append(labels, t.MountPoint, t.Name, "used")},
|
||||
})
|
||||
}
|
||||
|
||||
// Wireless Data - UDM (non-pro) only
|
||||
if d.Stat.Ap != nil && d.VapTable != nil {
|
||||
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
|
||||
u.exportVAPtable(r, labels, *d.VapTable)
|
||||
u.exportRADtable(r, labels, *d.RadioTable, *d.RadioTableStats)
|
||||
}
|
||||
}
|
||||
|
||||
// Shared by all.
|
||||
func (u *promUnifi) exportBYTstats(r report, labels []string, tx, rx unifi.FlexInt) {
|
||||
r.send([]*metric{
|
||||
{u.Device.TotalTxBytes, counter, tx, labels},
|
||||
{u.Device.TotalRxBytes, counter, rx, labels},
|
||||
{u.Device.TotalBytes, counter, tx.Val + rx.Val, labels},
|
||||
})
|
||||
}
|
||||
|
||||
// Shared by all, pass 2 or 5 stats.
|
||||
func (u *promUnifi) exportSTAcount(r report, labels []string, stas ...unifi.FlexInt) {
|
||||
r.send([]*metric{
|
||||
{u.Device.Counter, gauge, stas[0], append(labels, "user")},
|
||||
{u.Device.Counter, gauge, stas[1], append(labels, "guest")},
|
||||
})
|
||||
|
||||
if len(stas) > 2 { // nolint: gomnd
|
||||
r.send([]*metric{
|
||||
{u.Device.Counter, gauge, stas[2], append(labels, "desktop")},
|
||||
{u.Device.Counter, gauge, stas[3], append(labels, "mobile")},
|
||||
{u.Device.Counter, gauge, stas[4], append(labels, "handheld")},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Shared by all.
|
||||
func (u *promUnifi) exportSYSstats(r report, labels []string, s unifi.SysStats, ss unifi.SystemStats) {
|
||||
r.send([]*metric{
|
||||
{u.Device.Loadavg1, gauge, s.Loadavg1, labels},
|
||||
{u.Device.Loadavg5, gauge, s.Loadavg5, labels},
|
||||
{u.Device.Loadavg15, gauge, s.Loadavg15, labels},
|
||||
{u.Device.MemUsed, gauge, s.MemUsed, labels},
|
||||
{u.Device.MemTotal, gauge, s.MemTotal, labels},
|
||||
{u.Device.MemBuffer, gauge, s.MemBuffer, labels},
|
||||
{u.Device.CPU, gauge, ss.CPU.Val / 100.0, labels},
|
||||
{u.Device.Mem, gauge, ss.Mem.Val / 100.0, labels},
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,164 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
type usg struct {
|
||||
WanRxPackets *prometheus.Desc
|
||||
WanRxBytes *prometheus.Desc
|
||||
WanRxDropped *prometheus.Desc
|
||||
WanRxErrors *prometheus.Desc
|
||||
WanTxPackets *prometheus.Desc
|
||||
WanTxBytes *prometheus.Desc
|
||||
LanRxPackets *prometheus.Desc
|
||||
LanRxBytes *prometheus.Desc
|
||||
LanRxDropped *prometheus.Desc
|
||||
LanTxPackets *prometheus.Desc
|
||||
LanTxBytes *prometheus.Desc
|
||||
WanRxBroadcast *prometheus.Desc
|
||||
WanRxBytesR *prometheus.Desc
|
||||
WanRxMulticast *prometheus.Desc
|
||||
WanSpeed *prometheus.Desc
|
||||
WanTxBroadcast *prometheus.Desc
|
||||
WanTxBytesR *prometheus.Desc
|
||||
WanTxDropped *prometheus.Desc
|
||||
WanTxErrors *prometheus.Desc
|
||||
WanTxMulticast *prometheus.Desc
|
||||
WanBytesR *prometheus.Desc
|
||||
Latency *prometheus.Desc
|
||||
UplinkLatency *prometheus.Desc
|
||||
UplinkSpeed *prometheus.Desc
|
||||
Runtime *prometheus.Desc
|
||||
Rundate *prometheus.Desc
|
||||
XputDownload *prometheus.Desc
|
||||
XputUpload *prometheus.Desc
|
||||
}
|
||||
|
||||
func descUSG(ns string) *usg {
|
||||
labels := []string{"port", "site_name", "name", "source"}
|
||||
|
||||
return &usg{
|
||||
WanRxPackets: prometheus.NewDesc(ns+"wan_receive_packets_total", "WAN Receive Packets Total", labels, nil),
|
||||
WanRxBytes: prometheus.NewDesc(ns+"wan_receive_bytes_total", "WAN Receive Bytes Total", labels, nil),
|
||||
WanRxDropped: prometheus.NewDesc(ns+"wan_receive_dropped_total", "WAN Receive Dropped Total", labels, nil),
|
||||
WanRxErrors: prometheus.NewDesc(ns+"wan_receive_errors_total", "WAN Receive Errors Total", labels, nil),
|
||||
WanTxPackets: prometheus.NewDesc(ns+"wan_transmit_packets_total", "WAN Transmit Packets Total", labels, nil),
|
||||
WanTxBytes: prometheus.NewDesc(ns+"wan_transmit_bytes_total", "WAN Transmit Bytes Total", labels, nil),
|
||||
WanRxBroadcast: prometheus.NewDesc(ns+"wan_receive_broadcast_total", "WAN Receive Broadcast Total", labels, nil),
|
||||
WanRxBytesR: prometheus.NewDesc(ns+"wan_receive_rate_bytes", "WAN Receive Bytes Rate", labels, nil),
|
||||
WanRxMulticast: prometheus.NewDesc(ns+"wan_receive_multicast_total", "WAN Receive Multicast Total", labels, nil),
|
||||
WanSpeed: prometheus.NewDesc(ns+"wan_speed_bps", "WAN Speed", labels, nil),
|
||||
WanTxBroadcast: prometheus.NewDesc(ns+"wan_transmit_broadcast_total", "WAN Transmit Broadcast Total", labels, nil),
|
||||
WanTxBytesR: prometheus.NewDesc(ns+"wan_transmit_rate_bytes", "WAN Transmit Bytes Rate", labels, nil),
|
||||
WanTxDropped: prometheus.NewDesc(ns+"wan_transmit_dropped_total", "WAN Transmit Dropped Total", labels, nil),
|
||||
WanTxErrors: prometheus.NewDesc(ns+"wan_transmit_errors_total", "WAN Transmit Errors Total", labels, nil),
|
||||
WanTxMulticast: prometheus.NewDesc(ns+"wan_transmit_multicast_total", "WAN Transmit Multicast Total", labels, nil),
|
||||
WanBytesR: prometheus.NewDesc(ns+"wan_rate_bytes", "WAN Transfer Rate", labels, nil),
|
||||
LanRxPackets: prometheus.NewDesc(ns+"lan_receive_packets_total", "LAN Receive Packets Total", labels, nil),
|
||||
LanRxBytes: prometheus.NewDesc(ns+"lan_receive_bytes_total", "LAN Receive Bytes Total", labels, nil),
|
||||
LanRxDropped: prometheus.NewDesc(ns+"lan_receive_dropped_total", "LAN Receive Dropped Total", labels, nil),
|
||||
LanTxPackets: prometheus.NewDesc(ns+"lan_transmit_packets_total", "LAN Transmit Packets Total", labels, nil),
|
||||
LanTxBytes: prometheus.NewDesc(ns+"lan_transmit_bytes_total", "LAN Transmit Bytes Total", labels, nil),
|
||||
UplinkLatency: prometheus.NewDesc(ns+"uplink_latency_seconds", "Uplink Latency", labels, nil),
|
||||
UplinkSpeed: prometheus.NewDesc(ns+"uplink_speed_mbps", "Uplink Speed", labels, nil),
|
||||
Latency: prometheus.NewDesc(ns+"speedtest_latency_seconds", "Speedtest Latency", labels, nil),
|
||||
Runtime: prometheus.NewDesc(ns+"speedtest_runtime_seconds", "Speedtest Run Time", labels, nil),
|
||||
Rundate: prometheus.NewDesc(ns+"speedtest_rundate_seconds", "Speedtest Run Date", labels, nil),
|
||||
XputDownload: prometheus.NewDesc(ns+"speedtest_download", "Speedtest Download Rate", labels, nil),
|
||||
XputUpload: prometheus.NewDesc(ns+"speedtest_upload", "Speedtest Upload Rate", labels, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportUSG(r report, d *unifi.USG) {
|
||||
if !d.Adopted.Val || d.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
|
||||
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID}
|
||||
|
||||
for _, t := range d.Temperatures {
|
||||
r.send([]*metric{{u.Device.Temperature, gauge, t.Value, append(labels, t.Name, t.Type)}})
|
||||
}
|
||||
|
||||
for k, v := range d.SystemStats.Temps {
|
||||
temp, _ := strconv.ParseInt(strings.Split(v, " ")[0], 10, 64)
|
||||
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
|
||||
|
||||
if k = strings.ToLower(k); temp != 0 && k != "" {
|
||||
r.send([]*metric{{u.Device.Temperature, gauge, temp, append(labels, k, k)}})
|
||||
}
|
||||
}
|
||||
|
||||
// Gateway System Data.
|
||||
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
|
||||
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
|
||||
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
|
||||
u.exportUSGstats(r, labels, d.Stat.Gw, d.SpeedtestStatus, d.Uplink)
|
||||
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta, d.NumDesktop, d.UserNumSta, d.GuestNumSta)
|
||||
r.send([]*metric{
|
||||
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
|
||||
{u.Device.Uptime, gauge, d.Uptime, labels},
|
||||
})
|
||||
}
|
||||
|
||||
// Gateway Stats.
|
||||
func (u *promUnifi) exportUSGstats(r report, labels []string, gw *unifi.Gw, st unifi.SpeedtestStatus, ul unifi.Uplink) {
|
||||
if gw == nil {
|
||||
return
|
||||
}
|
||||
|
||||
labelLan := []string{"lan", labels[1], labels[2], labels[3]}
|
||||
labelWan := []string{"all", labels[1], labels[2], labels[3]}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.USG.LanRxPackets, counter, gw.LanRxPackets, labelLan},
|
||||
{u.USG.LanRxBytes, counter, gw.LanRxBytes, labelLan},
|
||||
{u.USG.LanTxPackets, counter, gw.LanTxPackets, labelLan},
|
||||
{u.USG.LanTxBytes, counter, gw.LanTxBytes, labelLan},
|
||||
{u.USG.LanRxDropped, counter, gw.LanRxDropped, labelLan},
|
||||
{u.USG.UplinkLatency, gauge, ul.Latency.Val / 1000, labelWan},
|
||||
{u.USG.UplinkSpeed, gauge, ul.Speed, labelWan},
|
||||
// Speed Test Stats
|
||||
{u.USG.Latency, gauge, st.Latency.Val / 1000, labelWan},
|
||||
{u.USG.Runtime, gauge, st.Runtime, labelWan},
|
||||
{u.USG.Rundate, gauge, st.Rundate, labelWan},
|
||||
{u.USG.XputDownload, gauge, st.XputDownload, labelWan},
|
||||
{u.USG.XputUpload, gauge, st.XputUpload, labelWan},
|
||||
})
|
||||
}
|
||||
|
||||
// WAN Stats.
|
||||
func (u *promUnifi) exportWANPorts(r report, labels []string, wans ...unifi.Wan) {
|
||||
for _, wan := range wans {
|
||||
if !wan.Up.Val {
|
||||
continue // only record UP interfaces.
|
||||
}
|
||||
|
||||
labelWan := []string{wan.Name, labels[1], labels[2], labels[3]}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.USG.WanRxPackets, counter, wan.RxPackets, labelWan},
|
||||
{u.USG.WanRxBytes, counter, wan.RxBytes, labelWan},
|
||||
{u.USG.WanRxDropped, counter, wan.RxDropped, labelWan},
|
||||
{u.USG.WanRxErrors, counter, wan.RxErrors, labelWan},
|
||||
{u.USG.WanTxPackets, counter, wan.TxPackets, labelWan},
|
||||
{u.USG.WanTxBytes, counter, wan.TxBytes, labelWan},
|
||||
{u.USG.WanRxBroadcast, counter, wan.RxBroadcast, labelWan},
|
||||
{u.USG.WanRxMulticast, counter, wan.RxMulticast, labelWan},
|
||||
{u.USG.WanSpeed, gauge, wan.Speed.Val * 1000000, labelWan},
|
||||
{u.USG.WanTxBroadcast, counter, wan.TxBroadcast, labelWan},
|
||||
{u.USG.WanTxBytesR, gauge, wan.TxBytesR, labelWan},
|
||||
{u.USG.WanRxBytesR, gauge, wan.RxBytesR, labelWan},
|
||||
{u.USG.WanTxDropped, counter, wan.TxDropped, labelWan},
|
||||
{u.USG.WanTxErrors, counter, wan.TxErrors, labelWan},
|
||||
{u.USG.WanTxMulticast, counter, wan.TxMulticast, labelWan},
|
||||
{u.USG.WanBytesR, gauge, wan.BytesR, labelWan},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
type usw struct {
|
||||
// Switch "total" traffic stats
|
||||
SwRxPackets *prometheus.Desc
|
||||
SwRxBytes *prometheus.Desc
|
||||
SwRxErrors *prometheus.Desc
|
||||
SwRxDropped *prometheus.Desc
|
||||
SwRxCrypts *prometheus.Desc
|
||||
SwRxFrags *prometheus.Desc
|
||||
SwTxPackets *prometheus.Desc
|
||||
SwTxBytes *prometheus.Desc
|
||||
SwTxErrors *prometheus.Desc
|
||||
SwTxDropped *prometheus.Desc
|
||||
SwTxRetries *prometheus.Desc
|
||||
SwRxMulticast *prometheus.Desc
|
||||
SwRxBroadcast *prometheus.Desc
|
||||
SwTxMulticast *prometheus.Desc
|
||||
SwTxBroadcast *prometheus.Desc
|
||||
SwBytes *prometheus.Desc
|
||||
// Port data.
|
||||
PoeCurrent *prometheus.Desc
|
||||
PoePower *prometheus.Desc
|
||||
PoeVoltage *prometheus.Desc
|
||||
RxBroadcast *prometheus.Desc
|
||||
RxBytes *prometheus.Desc
|
||||
RxBytesR *prometheus.Desc
|
||||
RxDropped *prometheus.Desc
|
||||
RxErrors *prometheus.Desc
|
||||
RxMulticast *prometheus.Desc
|
||||
RxPackets *prometheus.Desc
|
||||
Satisfaction *prometheus.Desc
|
||||
Speed *prometheus.Desc
|
||||
TxBroadcast *prometheus.Desc
|
||||
TxBytes *prometheus.Desc
|
||||
TxBytesR *prometheus.Desc
|
||||
TxDropped *prometheus.Desc
|
||||
TxErrors *prometheus.Desc
|
||||
TxMulticast *prometheus.Desc
|
||||
TxPackets *prometheus.Desc
|
||||
SFPCurrent *prometheus.Desc
|
||||
SFPRxPower *prometheus.Desc
|
||||
SFPTemperature *prometheus.Desc
|
||||
SFPTxPower *prometheus.Desc
|
||||
SFPVoltage *prometheus.Desc
|
||||
}
|
||||
|
||||
func descUSW(ns string) *usw {
|
||||
pns := ns + "port_"
|
||||
sfp := pns + "sfp_"
|
||||
labelS := []string{"site_name", "name", "source"}
|
||||
labelP := []string{"port_id", "port_num", "port_name", "port_mac", "port_ip", "site_name", "name", "source"}
|
||||
labelF := []string{
|
||||
"sfp_part", "sfp_vendor", "sfp_serial", "sfp_compliance",
|
||||
"port_id", "port_num", "port_name", "port_mac", "port_ip", "site_name", "name", "source",
|
||||
}
|
||||
nd := prometheus.NewDesc
|
||||
|
||||
return &usw{
|
||||
// This data may be derivable by sum()ing the port data.
|
||||
SwRxPackets: nd(ns+"switch_receive_packets_total", "Switch Packets Received Total", labelS, nil),
|
||||
SwRxBytes: nd(ns+"switch_receive_bytes_total", "Switch Bytes Received Total", labelS, nil),
|
||||
SwRxErrors: nd(ns+"switch_receive_errors_total", "Switch Errors Received Total", labelS, nil),
|
||||
SwRxDropped: nd(ns+"switch_receive_dropped_total", "Switch Dropped Received Total", labelS, nil),
|
||||
SwRxCrypts: nd(ns+"switch_receive_crypts_total", "Switch Crypts Received Total", labelS, nil),
|
||||
SwRxFrags: nd(ns+"switch_receive_frags_total", "Switch Frags Received Total", labelS, nil),
|
||||
SwTxPackets: nd(ns+"switch_transmit_packets_total", "Switch Packets Transmit Total", labelS, nil),
|
||||
SwTxBytes: nd(ns+"switch_transmit_bytes_total", "Switch Bytes Transmit Total", labelS, nil),
|
||||
SwTxErrors: nd(ns+"switch_transmit_errors_total", "Switch Errors Transmit Total", labelS, nil),
|
||||
SwTxDropped: nd(ns+"switch_transmit_dropped_total", "Switch Dropped Transmit Total", labelS, nil),
|
||||
SwTxRetries: nd(ns+"switch_transmit_retries_total", "Switch Retries Transmit Total", labelS, nil),
|
||||
SwRxMulticast: nd(ns+"switch_receive_multicast_total", "Switch Multicast Receive Total", labelS, nil),
|
||||
SwRxBroadcast: nd(ns+"switch_receive_broadcast_total", "Switch Broadcast Receive Total", labelS, nil),
|
||||
SwTxMulticast: nd(ns+"switch_transmit_multicast_total", "Switch Multicast Transmit Total", labelS, nil),
|
||||
SwTxBroadcast: nd(ns+"switch_transmit_broadcast_total", "Switch Broadcast Transmit Total", labelS, nil),
|
||||
SwBytes: nd(ns+"switch_bytes_total", "Switch Bytes Transferred Total", labelS, nil),
|
||||
// per-port data
|
||||
PoeCurrent: nd(pns+"poe_amperes", "POE Current", labelP, nil),
|
||||
PoePower: nd(pns+"poe_watts", "POE Power", labelP, nil),
|
||||
PoeVoltage: nd(pns+"poe_volts", "POE Voltage", labelP, nil),
|
||||
RxBroadcast: nd(pns+"receive_broadcast_total", "Receive Broadcast", labelP, nil),
|
||||
RxBytes: nd(pns+"receive_bytes_total", "Total Receive Bytes", labelP, nil),
|
||||
RxBytesR: nd(pns+"receive_rate_bytes", "Receive Bytes Rate", labelP, nil),
|
||||
RxDropped: nd(pns+"receive_dropped_total", "Total Receive Dropped", labelP, nil),
|
||||
RxErrors: nd(pns+"receive_errors_total", "Total Receive Errors", labelP, nil),
|
||||
RxMulticast: nd(pns+"receive_multicast_total", "Total Receive Multicast", labelP, nil),
|
||||
RxPackets: nd(pns+"receive_packets_total", "Total Receive Packets", labelP, nil),
|
||||
Satisfaction: nd(pns+"satisfaction_ratio", "Satisfaction", labelP, nil),
|
||||
Speed: nd(pns+"port_speed_bps", "Speed", labelP, nil),
|
||||
TxBroadcast: nd(pns+"transmit_broadcast_total", "Total Transmit Broadcast", labelP, nil),
|
||||
TxBytes: nd(pns+"transmit_bytes_total", "Total Transmit Bytes", labelP, nil),
|
||||
TxBytesR: nd(pns+"transmit_rate_bytes", "Transmit Bytes Rate", labelP, nil),
|
||||
TxDropped: nd(pns+"transmit_dropped_total", "Total Transmit Dropped", labelP, nil),
|
||||
TxErrors: nd(pns+"transmit_errors_total", "Total Transmit Errors", labelP, nil),
|
||||
TxMulticast: nd(pns+"transmit_multicast_total", "Total Tranmist Multicast", labelP, nil),
|
||||
TxPackets: nd(pns+"transmit_packets_total", "Total Transmit Packets", labelP, nil),
|
||||
SFPCurrent: nd(sfp+"current", "SFP Current", labelF, nil),
|
||||
SFPRxPower: nd(sfp+"rx_power", "SFP Receive Power", labelF, nil),
|
||||
SFPTemperature: nd(sfp+"temperature", "SFP Temperature", labelF, nil),
|
||||
SFPTxPower: nd(sfp+"tx_power", "SFP Transmit Power", labelF, nil),
|
||||
SFPVoltage: nd(sfp+"voltage", "SFP Voltage", labelF, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *promUnifi) exportUSW(r report, d *unifi.USW) {
|
||||
if !d.Adopted.Val || d.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
|
||||
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID}
|
||||
|
||||
u.exportUSWstats(r, labels, d.Stat.Sw)
|
||||
u.exportPRTtable(r, labels, d.PortTable)
|
||||
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
|
||||
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
|
||||
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta)
|
||||
r.send([]*metric{
|
||||
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
|
||||
{u.Device.Uptime, gauge, d.Uptime, labels},
|
||||
})
|
||||
|
||||
// Switch System Data.
|
||||
if d.HasTemperature.Val {
|
||||
r.send([]*metric{{u.Device.Temperature, gauge, d.GeneralTemperature, append(labels, "general", "board")}})
|
||||
}
|
||||
|
||||
if d.HasFan.Val {
|
||||
r.send([]*metric{{u.Device.FanLevel, gauge, d.FanLevel, labels}})
|
||||
}
|
||||
|
||||
if d.TotalMaxPower.Txt != "" {
|
||||
r.send([]*metric{{u.Device.TotalMaxPower, gauge, d.TotalMaxPower, labels}})
|
||||
}
|
||||
}
|
||||
|
||||
// Switch Stats.
|
||||
func (u *promUnifi) exportUSWstats(r report, labels []string, sw *unifi.Sw) {
|
||||
if sw == nil {
|
||||
return
|
||||
}
|
||||
|
||||
labelS := labels[1:]
|
||||
|
||||
r.send([]*metric{
|
||||
{u.USW.SwRxPackets, counter, sw.RxPackets, labelS},
|
||||
{u.USW.SwRxBytes, counter, sw.RxBytes, labelS},
|
||||
{u.USW.SwRxErrors, counter, sw.RxErrors, labelS},
|
||||
{u.USW.SwRxDropped, counter, sw.RxDropped, labelS},
|
||||
{u.USW.SwRxCrypts, counter, sw.RxCrypts, labelS},
|
||||
{u.USW.SwRxFrags, counter, sw.RxFrags, labelS},
|
||||
{u.USW.SwTxPackets, counter, sw.TxPackets, labelS},
|
||||
{u.USW.SwTxBytes, counter, sw.TxBytes, labelS},
|
||||
{u.USW.SwTxErrors, counter, sw.TxErrors, labelS},
|
||||
{u.USW.SwTxDropped, counter, sw.TxDropped, labelS},
|
||||
{u.USW.SwTxRetries, counter, sw.TxRetries, labelS},
|
||||
{u.USW.SwRxMulticast, counter, sw.RxMulticast, labelS},
|
||||
{u.USW.SwRxBroadcast, counter, sw.RxBroadcast, labelS},
|
||||
{u.USW.SwTxMulticast, counter, sw.TxMulticast, labelS},
|
||||
{u.USW.SwTxBroadcast, counter, sw.TxBroadcast, labelS},
|
||||
{u.USW.SwBytes, counter, sw.Bytes, labelS},
|
||||
})
|
||||
}
|
||||
|
||||
// Switch Port Table.
|
||||
func (u *promUnifi) exportPRTtable(r report, labels []string, pt []unifi.Port) {
|
||||
// Per-port data on a switch
|
||||
for _, p := range pt {
|
||||
if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Copy labels, and add four new ones.
|
||||
labelP := []string{
|
||||
labels[2] + " Port " + p.PortIdx.Txt, p.PortIdx.Txt,
|
||||
p.Name, p.Mac, p.IP, labels[1], labels[2], labels[3],
|
||||
}
|
||||
|
||||
if p.PoeEnable.Val && p.PortPoe.Val {
|
||||
r.send([]*metric{
|
||||
{u.USW.PoeCurrent, gauge, p.PoeCurrent, labelP},
|
||||
{u.USW.PoePower, gauge, p.PoePower, labelP},
|
||||
{u.USW.PoeVoltage, gauge, p.PoeVoltage, labelP},
|
||||
})
|
||||
}
|
||||
|
||||
if p.SFPFound.Val {
|
||||
labelF := []string{
|
||||
p.SFPPart, p.SFPVendor, p.SFPSerial, p.SFPCompliance,
|
||||
labelP[0], labelP[1], labelP[2], labelP[3], labelP[4], labelP[5], labelP[6], labelP[7],
|
||||
}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.USW.SFPCurrent, gauge, p.SFPCurrent.Val, labelF},
|
||||
{u.USW.SFPVoltage, gauge, p.SFPVoltage.Val, labelF},
|
||||
{u.USW.SFPTemperature, gauge, p.SFPTemperature.Val, labelF},
|
||||
{u.USW.SFPRxPower, gauge, p.SFPRxpower.Val, labelF},
|
||||
{u.USW.SFPTxPower, gauge, p.SFPTxpower.Val, labelF},
|
||||
})
|
||||
}
|
||||
|
||||
r.send([]*metric{
|
||||
{u.USW.RxBroadcast, counter, p.RxBroadcast, labelP},
|
||||
{u.USW.RxBytes, counter, p.RxBytes, labelP},
|
||||
{u.USW.RxBytesR, gauge, p.RxBytesR, labelP},
|
||||
{u.USW.RxDropped, counter, p.RxDropped, labelP},
|
||||
{u.USW.RxErrors, counter, p.RxErrors, labelP},
|
||||
{u.USW.RxMulticast, counter, p.RxMulticast, labelP},
|
||||
{u.USW.RxPackets, counter, p.RxPackets, labelP},
|
||||
{u.USW.Satisfaction, gauge, p.Satisfaction.Val / 100.0, labelP},
|
||||
{u.USW.Speed, gauge, p.Speed.Val * 1000000, labelP},
|
||||
{u.USW.TxBroadcast, counter, p.TxBroadcast, labelP},
|
||||
{u.USW.TxBytes, counter, p.TxBytes, labelP},
|
||||
{u.USW.TxBytesR, gauge, p.TxBytesR, labelP},
|
||||
{u.USW.TxDropped, counter, p.TxDropped, labelP},
|
||||
{u.USW.TxErrors, counter, p.TxErrors, labelP},
|
||||
{u.USW.TxMulticast, counter, p.TxMulticast, labelP},
|
||||
{u.USW.TxPackets, counter, p.TxPackets, labelP},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
package promunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// exportUXG is a collection of stats from USG and USW. It has no unique stats.
|
||||
func (u *promUnifi) exportUXG(r report, d *unifi.UXG) {
|
||||
if !d.Adopted.Val || d.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
|
||||
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID}
|
||||
// Shared data (all devices do this).
|
||||
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
|
||||
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
|
||||
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta, d.NumDesktop, d.NumMobile, d.NumHandheld)
|
||||
// Switch Data
|
||||
u.exportUSWstats(r, labels, d.Stat.Sw)
|
||||
u.exportPRTtable(r, labels, d.PortTable)
|
||||
// Gateway Data
|
||||
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
|
||||
u.exportUSGstats(r, labels, d.Stat.Gw, d.SpeedtestStatus, d.Uplink)
|
||||
// Dream Machine System Data.
|
||||
r.send([]*metric{
|
||||
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
|
||||
{u.Device.Uptime, gauge, d.Uptime, labels},
|
||||
})
|
||||
|
||||
for _, t := range d.Temperatures {
|
||||
r.send([]*metric{{u.Device.Temperature, gauge, t.Value, append(labels, t.Name, t.Type)}})
|
||||
}
|
||||
|
||||
// UDM pro and UXG have hard drives.
|
||||
for _, t := range d.Storage {
|
||||
r.send([]*metric{
|
||||
{u.Device.Storage, gauge, t.Size.Val, append(labels, t.MountPoint, t.Name, "size")},
|
||||
{u.Device.Storage, gauge, t.Used.Val, append(labels, t.MountPoint, t.Name, "used")},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2020 David Newhall II
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# UniFi Poller: `webserver` plugin
|
||||
|
||||
Built-In Web Server Go Library for UniFi Poller.
|
||||
|
||||
Enabling the web server is optional. It provides a window into the running data.
|
||||
The web server may be secured with a simple password. SSL is also optional.
|
||||
|
||||
See the [Web Server Wiki](https://github.com/unifi-poller/unifi-poller/wiki/Web-Server)
|
||||
for more information about how it works.
|
||||
|
||||
Other plugins must import this library to make use of it. While this library is
|
||||
labeled as a plugin, it's pretty much required since everything imports it.
|
||||
That said, it is still disabled by default, and won't store any data unless it's
|
||||
enabled.
|
||||
|
||||
_This needs a better godoc and examples._
|
||||
|
||||
## Overview
|
||||
|
||||
- Recent logs from poller are visible.
|
||||
- Uptime and Version are displayed across the top.
|
||||
|
||||
### Controllers
|
||||
|
||||
- The web server interface allows you to see the configuration for each controller.
|
||||
- Some meta data about each controller is displayed, such as sites, clients and devices.
|
||||
- Example config: [up.json.example](https://github.com/unifi-poller/unifi-poller/blob/master/examples/up.json.example)
|
||||
|
||||
### Input Plugins
|
||||
|
||||
- You may view input plugin configuration. Currently only UniFi.
|
||||
- The example config above shows input plugin data.
|
||||
|
||||
### Output Plugins
|
||||
|
||||
- You may view output plugin configuration. Currently Prometheus and InfluxDB.
|
||||
- The example config above shows output plugin data.
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
package webserver
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
/* This file has the methods that pass out actual content. */
|
||||
|
||||
// Returns the main index file.
|
||||
// If index.html becomes a template, this is where it can be compiled.
|
||||
func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filepath.Join(s.HTMLPath, "index.html"))
|
||||
}
|
||||
|
||||
// Arbitrary /health handler.
|
||||
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
s.handleDone(w, []byte("OK"), mimeHTML)
|
||||
}
|
||||
|
||||
// Returns static files from static-files path. /css, /js, /img (/images, /image).
|
||||
func (s *Server) handleStatic(w http.ResponseWriter, r *http.Request) {
|
||||
switch v := mux.Vars(r)["sub"]; v {
|
||||
case "image", "img":
|
||||
dir := http.Dir(filepath.Join(s.HTMLPath, "static", "images"))
|
||||
http.StripPrefix("/"+v, http.FileServer(dir)).ServeHTTP(w, r)
|
||||
default: // images, js, css, etc
|
||||
dir := http.Dir(filepath.Join(s.HTMLPath, "static", v))
|
||||
http.StripPrefix("/"+v, http.FileServer(dir)).ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns poller configs and/or plugins. /api/v1/config.
|
||||
func (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
|
||||
switch vars["sub"] {
|
||||
case "":
|
||||
data := map[string]interface{}{
|
||||
"inputs": s.Collect.Inputs(),
|
||||
"outputs": s.Collect.Outputs(),
|
||||
"poller": s.Collect.Poller(),
|
||||
"uptime": int(time.Since(s.start).Round(time.Second).Seconds()),
|
||||
}
|
||||
s.handleJSON(w, data)
|
||||
case "plugins":
|
||||
data := map[string][]string{
|
||||
"inputs": s.Collect.Inputs(),
|
||||
"outputs": s.Collect.Outputs(),
|
||||
}
|
||||
s.handleJSON(w, data)
|
||||
default:
|
||||
s.handleMissing(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an output plugin's data: /api/v1/output/{output}.
|
||||
func (s *Server) handleOutput(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
|
||||
c := s.plugins.getOutput(vars["output"])
|
||||
if c == nil {
|
||||
s.handleMissing(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
switch val := vars["value"]; vars["sub"] {
|
||||
default:
|
||||
s.handleJSON(w, c.Config)
|
||||
case "eventgroups":
|
||||
s.handleJSON(w, c.Events.Groups(val))
|
||||
case "events":
|
||||
switch events, ok := c.Events[val]; {
|
||||
case val == "":
|
||||
s.handleJSON(w, c.Events)
|
||||
case ok:
|
||||
s.handleJSON(w, events)
|
||||
default:
|
||||
s.handleMissing(w, r)
|
||||
}
|
||||
case "counters":
|
||||
if val == "" {
|
||||
s.handleJSON(w, c.Counter)
|
||||
} else {
|
||||
s.handleJSON(w, map[string]int64{val: c.Counter[val]})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an input plugin's data: /api/v1/input/{input}.
|
||||
func (s *Server) handleInput(w http.ResponseWriter, r *http.Request) { //nolint:cyclop
|
||||
vars := mux.Vars(r)
|
||||
|
||||
c := s.plugins.getInput(vars["input"])
|
||||
if c == nil {
|
||||
s.handleMissing(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
switch val := vars["value"]; vars["sub"] {
|
||||
default:
|
||||
s.handleJSON(w, c.Config)
|
||||
case "eventgroups":
|
||||
s.handleJSON(w, c.Events.Groups(val))
|
||||
case "events":
|
||||
switch events, ok := c.Events[val]; {
|
||||
case val == "":
|
||||
s.handleJSON(w, c.Events)
|
||||
case ok:
|
||||
s.handleJSON(w, events)
|
||||
default:
|
||||
s.handleMissing(w, r)
|
||||
}
|
||||
case "sites":
|
||||
s.handleJSON(w, c.Sites)
|
||||
case "devices":
|
||||
s.handleJSON(w, c.Devices.Filter(val))
|
||||
case "clients":
|
||||
s.handleJSON(w, c.Clients.Filter(val))
|
||||
case "counters":
|
||||
if val != "" {
|
||||
s.handleJSON(w, map[string]int64{val: c.Counter[val]})
|
||||
} else {
|
||||
s.handleJSON(w, c.Counter)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
package webserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logf logs a message.
|
||||
func (s *Server) Logf(msg string, v ...interface{}) {
|
||||
NewOutputEvent(PluginName, PluginName, &Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "info"},
|
||||
})
|
||||
s.Collect.Logf(msg, v...)
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (s *Server) LogErrorf(msg string, v ...interface{}) {
|
||||
NewOutputEvent(PluginName, PluginName, &Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "error"},
|
||||
})
|
||||
s.Collect.LogErrorf(msg, v...)
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (s *Server) LogDebugf(msg string, v ...interface{}) {
|
||||
NewOutputEvent(PluginName, PluginName, &Event{
|
||||
Ts: time.Now(),
|
||||
Msg: fmt.Sprintf(msg, v...),
|
||||
Tags: map[string]string{"type": "debug"},
|
||||
})
|
||||
s.Collect.LogDebugf(msg, v...)
|
||||
}
|
||||
|
|
@ -0,0 +1,236 @@
|
|||
package webserver
|
||||
|
||||
import "sync"
|
||||
|
||||
type webPlugins struct {
|
||||
*Config
|
||||
inputs []*Input
|
||||
outputs []*Output
|
||||
sync.RWMutex // Locks both of the above slices.
|
||||
}
|
||||
|
||||
// This is global so other plugins can call its methods.
|
||||
var plugins = &webPlugins{} // nolint: gochecknoglobals
|
||||
|
||||
// UpdateInput allows an input plugin to create an entry or update an existing entry.
|
||||
func UpdateInput(config *Input) {
|
||||
if plugins.Enable {
|
||||
plugins.updateInput(config)
|
||||
}
|
||||
}
|
||||
|
||||
// NewInputEvent adds an event for an input plugin.
|
||||
func NewInputEvent(name, id string, event *Event) {
|
||||
if plugins.Enable {
|
||||
plugins.newInputEvent(name, id, event)
|
||||
}
|
||||
}
|
||||
|
||||
// NewOutputEvent adds an event for an output plugin.
|
||||
func NewOutputEvent(name, id string, event *Event) {
|
||||
if plugins.Enable {
|
||||
plugins.newOutputEvent(name, id, event)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOutput allows an output plugin to create an entry or update an existing entry.
|
||||
func UpdateOutput(config *Output) {
|
||||
if plugins.Enable {
|
||||
plugins.updateOutput(config)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOutputCounter allows an output plugin to update a counter's value.
|
||||
func UpdateOutputCounter(plugin, label string, values ...int64) {
|
||||
if plugins.Enable {
|
||||
plugins.updateOutputCounter(plugin, label, values...)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateInputCounter allows an input plugin to update a counter's value.
|
||||
// Set any arbitrary counter. These are displayed on the web interface.
|
||||
func UpdateInputCounter(plugin, label string, values ...int64) {
|
||||
if plugins.Enable {
|
||||
plugins.updateInputCounter(plugin, label, values...)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webPlugins) updateOutputCounter(plugin, label string, values ...int64) {
|
||||
if len(values) == 0 {
|
||||
values = []int64{1}
|
||||
}
|
||||
|
||||
output := w.getOutput(plugin)
|
||||
if output == nil {
|
||||
return
|
||||
}
|
||||
|
||||
output.Lock()
|
||||
defer output.Unlock()
|
||||
|
||||
if output.Counter == nil {
|
||||
output.Counter = make(map[string]int64)
|
||||
}
|
||||
|
||||
for _, v := range values {
|
||||
output.Counter[label] += v
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webPlugins) updateInputCounter(plugin, label string, values ...int64) {
|
||||
if len(values) == 0 {
|
||||
values = []int64{1}
|
||||
}
|
||||
|
||||
input := w.getInput(plugin)
|
||||
if input == nil {
|
||||
return
|
||||
}
|
||||
|
||||
input.Lock()
|
||||
defer input.Unlock()
|
||||
|
||||
if input.Counter == nil {
|
||||
input.Counter = make(map[string]int64)
|
||||
}
|
||||
|
||||
for _, v := range values {
|
||||
input.Counter[label] += v
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webPlugins) updateInput(config *Input) {
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
|
||||
input := w.getInput(config.Name)
|
||||
if input == nil {
|
||||
w.newInput(config)
|
||||
return
|
||||
}
|
||||
|
||||
input.Lock()
|
||||
defer input.Unlock()
|
||||
|
||||
if config.Clients != nil {
|
||||
input.Clients = config.Clients
|
||||
}
|
||||
|
||||
if config.Sites != nil {
|
||||
input.Sites = config.Sites
|
||||
}
|
||||
|
||||
if config.Devices != nil {
|
||||
input.Devices = config.Devices
|
||||
}
|
||||
|
||||
if config.Config != nil {
|
||||
input.Config = config.Config
|
||||
}
|
||||
|
||||
if config.Counter != nil {
|
||||
input.Counter = config.Counter
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webPlugins) updateOutput(config *Output) {
|
||||
if config == nil || config.Config == nil {
|
||||
return
|
||||
}
|
||||
|
||||
output := w.getOutput(config.Name)
|
||||
if output == nil {
|
||||
w.newOutput(config)
|
||||
return
|
||||
}
|
||||
|
||||
output.Lock()
|
||||
defer output.Unlock()
|
||||
|
||||
if config.Config != nil {
|
||||
output.Config = config.Config
|
||||
}
|
||||
|
||||
if config.Counter != nil {
|
||||
output.Counter = config.Counter
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webPlugins) newInputEvent(plugin, id string, event *Event) {
|
||||
input := w.getInput(plugin)
|
||||
if input == nil {
|
||||
return
|
||||
}
|
||||
|
||||
input.Lock()
|
||||
defer input.Unlock()
|
||||
|
||||
if input.Events == nil {
|
||||
input.Events = make(Events)
|
||||
}
|
||||
|
||||
if _, ok := input.Events[id]; !ok {
|
||||
input.Events[id] = &EventGroup{}
|
||||
}
|
||||
|
||||
input.Events[id].add(event, int(w.Config.MaxEvents))
|
||||
}
|
||||
|
||||
func (w *webPlugins) newOutputEvent(plugin, id string, event *Event) {
|
||||
output := w.getOutput(plugin)
|
||||
if output == nil {
|
||||
return
|
||||
}
|
||||
|
||||
output.Lock()
|
||||
defer output.Unlock()
|
||||
|
||||
if output.Events == nil {
|
||||
output.Events = make(Events)
|
||||
}
|
||||
|
||||
if _, ok := output.Events[id]; !ok {
|
||||
output.Events[id] = &EventGroup{}
|
||||
}
|
||||
|
||||
output.Events[id].add(event, int(w.Config.MaxEvents))
|
||||
}
|
||||
|
||||
func (w *webPlugins) newInput(config *Input) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.inputs = append(w.inputs, config)
|
||||
}
|
||||
|
||||
func (w *webPlugins) newOutput(config *Output) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.outputs = append(w.outputs, config)
|
||||
}
|
||||
|
||||
func (w *webPlugins) getInput(name string) *Input {
|
||||
w.RLock()
|
||||
defer w.RUnlock()
|
||||
|
||||
for i := range w.inputs {
|
||||
if w.inputs[i].Name == name {
|
||||
return w.inputs[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webPlugins) getOutput(name string) *Output {
|
||||
w.RLock()
|
||||
defer w.RUnlock()
|
||||
|
||||
for i := range w.outputs {
|
||||
if w.outputs[i].Name == name {
|
||||
return w.outputs[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
package webserver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Input is the data tracked for intput plugins.
|
||||
// An input plugin should fill this data every time it polls this data.
|
||||
// Partial update are OK. Set non-updated fields to nil and they're ignored.
|
||||
type Input struct {
|
||||
Name string
|
||||
Sites Sites
|
||||
Events Events
|
||||
Devices Devices
|
||||
Clients Clients
|
||||
Config interface{}
|
||||
Counter map[string]int64
|
||||
sync.RWMutex // Locks this data structure.
|
||||
}
|
||||
|
||||
// Output is the data tracked for output plugins.
|
||||
// Output plugins should fill this data on startup,
|
||||
// and regularly update counters for things worth counting.
|
||||
// Setting Config will overwrite previous value.
|
||||
type Output struct {
|
||||
Name string
|
||||
Events Events
|
||||
Config interface{}
|
||||
Counter map[string]int64
|
||||
sync.RWMutex // Locks this data structure.
|
||||
}
|
||||
|
||||
/*
|
||||
These are minimal types to display a small set of data on the web interface.
|
||||
These may be expanded upon, in time, as users express their needs and wants.
|
||||
*/
|
||||
|
||||
// Sites is a list of network locations.
|
||||
type Sites []*Site
|
||||
|
||||
// Site is a network location and its meta data.
|
||||
type Site struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Desc string `json:"desc"`
|
||||
Source string `json:"source"`
|
||||
Controller string `json:"controller"`
|
||||
}
|
||||
|
||||
// Events is all the events a plugin has. string = SiteID + text, or plugin name, or "whatever".
|
||||
type Events map[string]*EventGroup
|
||||
|
||||
// EventGroup allows each plugin to have a map of events. ie. one map per controller.
|
||||
type EventGroup struct {
|
||||
Latest time.Time `json:"latest"`
|
||||
Events []*Event `json:"events"`
|
||||
}
|
||||
|
||||
// Event is like a log message.
|
||||
type Event struct {
|
||||
Ts time.Time `json:"ts"` // nolint: stylecheck
|
||||
Msg string `json:"msg"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
func (e Events) Groups(prefix string) (groups []string) {
|
||||
for n := range e {
|
||||
if prefix == "" || strings.HasPrefix(n, prefix) {
|
||||
groups = append(groups, n)
|
||||
}
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
// add adds a new event and makes sure the slice is not too big.
|
||||
func (e *EventGroup) add(event *Event, max int) {
|
||||
if !e.Latest.Before(event.Ts) {
|
||||
return // Ignore older events.
|
||||
}
|
||||
|
||||
e.Latest = event.Ts
|
||||
e.Events = append(e.Events, event)
|
||||
|
||||
if i := len(e.Events) - max; i > 0 {
|
||||
e.Events = e.Events[i:]
|
||||
}
|
||||
}
|
||||
|
||||
// Devices is a list of network devices and their data.
|
||||
type Devices []*Device
|
||||
|
||||
// Device holds the data for a network device.
|
||||
type Device struct {
|
||||
Clients int `json:"clients"`
|
||||
Uptime int `json:"uptime"`
|
||||
Name string `json:"name"`
|
||||
SiteID string `json:"site_id"`
|
||||
Source string `json:"source"`
|
||||
Controller string `json:"controller"`
|
||||
MAC string `json:"mac"`
|
||||
IP string `json:"ip"`
|
||||
Type string `json:"type"`
|
||||
Model string `json:"model"`
|
||||
Version string `json:"version"`
|
||||
Config interface{} `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
func (c Devices) Filter(siteid string) (devices []*Device) {
|
||||
for _, n := range c {
|
||||
if siteid == "" || n.SiteID == siteid {
|
||||
devices = append(devices, n)
|
||||
}
|
||||
}
|
||||
|
||||
return devices
|
||||
}
|
||||
|
||||
// Clients is a list of clients with their data.
|
||||
type Clients []*Client
|
||||
|
||||
// Client holds the data for a network client.
|
||||
type Client struct {
|
||||
Rx int64 `json:"rx_bytes"`
|
||||
Tx int64 `json:"tx_bytes"`
|
||||
Name string `json:"name"`
|
||||
SiteID string `json:"site_id"`
|
||||
Source string `json:"source"`
|
||||
Controller string `json:"controller"`
|
||||
MAC string `json:"mac"`
|
||||
IP string `json:"ip"`
|
||||
Type string `json:"type"`
|
||||
DeviceMAC string `json:"device_mac"`
|
||||
Since time.Time `json:"since"`
|
||||
Last time.Time `json:"last"`
|
||||
}
|
||||
|
||||
func (c Clients) Filter(siteid string) (clients []*Client) {
|
||||
for _, n := range c {
|
||||
if siteid == "" || n.SiteID == siteid {
|
||||
clients = append(clients, n)
|
||||
}
|
||||
}
|
||||
|
||||
return clients
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
// Package webserver is a UniFi Poller plugin that exports running data to a web interface.
|
||||
package webserver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
const (
|
||||
// PluginName identifies this output plugin.
|
||||
PluginName = "WebServer"
|
||||
// DefaultPort is the default web http port.
|
||||
DefaultPort = 37288
|
||||
// DefaultEvents is the default number of events stored per plugin.
|
||||
DefaultEvents = 200
|
||||
)
|
||||
|
||||
// Config is the webserver library input config.
|
||||
type Config struct {
|
||||
Enable bool `json:"enable" toml:"enable" xml:"enable,attr" yaml:"enable"`
|
||||
SSLCrtPath string `json:"ssl_cert_path" toml:"ssl_cert_path" xml:"ssl_cert_path" yaml:"ssl_cert_path"`
|
||||
SSLKeyPath string `json:"ssl_key_path" toml:"ssl_key_path" xml:"ssl_key_path" yaml:"ssl_key_path"`
|
||||
Port uint `json:"port" toml:"port" xml:"port" yaml:"port"`
|
||||
Accounts accounts `json:"accounts" toml:"accounts" xml:"accounts" yaml:"accounts"`
|
||||
HTMLPath string `json:"html_path" toml:"html_path" xml:"html_path" yaml:"html_path"`
|
||||
MaxEvents uint `json:"max_events" toml:"max_events" xml:"max_events" yaml:"max_events"`
|
||||
}
|
||||
|
||||
// accounts stores a map of usernames and password hashes.
|
||||
type accounts map[string]string
|
||||
|
||||
// Server is the main library struct/data.
|
||||
type Server struct {
|
||||
*Config `json:"webserver" toml:"webserver" xml:"webserver" yaml:"webserver"`
|
||||
server *http.Server
|
||||
plugins *webPlugins
|
||||
Collect poller.Collect
|
||||
start time.Time
|
||||
}
|
||||
|
||||
// init is how this modular code is initialized by the main app.
|
||||
// This module adds itself as an output module to the poller core.
|
||||
func init() { // nolint: gochecknoinits
|
||||
s := &Server{plugins: plugins, start: time.Now(), Config: &Config{
|
||||
Port: DefaultPort,
|
||||
HTMLPath: filepath.Join(poller.DefaultObjPath(), "web"),
|
||||
MaxEvents: DefaultEvents,
|
||||
}}
|
||||
plugins.Config = s.Config
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: PluginName,
|
||||
Config: s,
|
||||
Method: s.Run,
|
||||
})
|
||||
}
|
||||
|
||||
// Run starts the server and gets things going.
|
||||
func (s *Server) Run(c poller.Collect) error {
|
||||
if s.Collect = c; s.Config == nil || s.Port == 0 || s.HTMLPath == "" || !s.Enable {
|
||||
s.Logf("Internal web server disabled!")
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat(s.HTMLPath); err != nil {
|
||||
return fmt.Errorf("problem with HTML path: %w", err)
|
||||
}
|
||||
|
||||
UpdateOutput(&Output{Name: PluginName, Config: s.Config})
|
||||
|
||||
return s.Start()
|
||||
}
|
||||
|
||||
// Start gets the web server going.
|
||||
func (s *Server) Start() (err error) {
|
||||
s.server = &http.Server{
|
||||
Addr: "0.0.0.0:" + strconv.Itoa(int(s.Port)),
|
||||
WriteTimeout: time.Minute,
|
||||
ReadTimeout: time.Minute,
|
||||
IdleTimeout: time.Minute,
|
||||
Handler: s.newRouter(), // *mux.Router
|
||||
}
|
||||
|
||||
if s.SSLCrtPath == "" || s.SSLKeyPath == "" {
|
||||
s.Logf("Web server starting without SSL. Listening on HTTP port %d", s.Port)
|
||||
err = s.server.ListenAndServe()
|
||||
} else {
|
||||
s.Logf("Web server starting with SSL. Listening on HTTPS port %d", s.Port)
|
||||
err = s.server.ListenAndServeTLS(s.SSLCrtPath, s.SSLKeyPath)
|
||||
}
|
||||
|
||||
if !errors.Is(err, http.ErrServerClosed) {
|
||||
return fmt.Errorf("web server: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) newRouter() *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
// special routes
|
||||
router.Handle("/debug/vars", http.DefaultServeMux).Methods("GET") // unauthenticated expvar
|
||||
router.HandleFunc("/health", s.handleLog(s.handleHealth)).Methods("GET") // unauthenticated health
|
||||
// main web app/files/js/css
|
||||
router.HandleFunc("/", s.basicAuth(s.handleIndex)).Methods("GET", "POST")
|
||||
router.PathPrefix("/{sub:css|js|img|image|images}/").Handler((s.basicAuth(s.handleStatic))).Methods("GET")
|
||||
// api paths for json dumps
|
||||
router.HandleFunc("/api/v1/config", s.basicAuth(s.handleConfig)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/config/{sub}", s.basicAuth(s.handleConfig)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/config/{sub}/{value}", s.basicAuth(s.handleConfig)).Methods("GET", "POST")
|
||||
router.HandleFunc("/api/v1/input/{input}", s.basicAuth(s.handleInput)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/input/{input}/{sub}", s.basicAuth(s.handleInput)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/input/{input}/{sub}/{value}", s.basicAuth(s.handleInput)).Methods("GET", "POST")
|
||||
router.HandleFunc("/api/v1/output/{output}", s.basicAuth(s.handleOutput)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/output/{output}/{sub}", s.basicAuth(s.handleOutput)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/output/{output}/{sub}/{value}", s.basicAuth(s.handleOutput)).Methods("GET", "POST")
|
||||
router.PathPrefix("/").Handler(s.basicAuth(s.handleMissing)).Methods("GET", "POST", "PUT") // 404 everything.
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
// PasswordIsCorrect returns true if the provided password matches a user's account.
|
||||
func (a accounts) PasswordIsCorrect(user, pass string, ok bool) bool {
|
||||
if len(a) == 0 {
|
||||
return true // No accounts defined in config; allow anyone.
|
||||
} else if !ok {
|
||||
return false // r.BasicAuth() failed, not a valid user.
|
||||
} else if user, ok = a[user]; !ok { // The user var is now the password hash.
|
||||
return false // The username provided doesn't exist.
|
||||
}
|
||||
|
||||
// If this is returns nil, the provided password matches, so return true.
|
||||
return bcrypt.CompareHashAndPassword([]byte(user), []byte(pass)) == nil
|
||||
}
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
package webserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unpoller/pkg/poller"
|
||||
)
|
||||
|
||||
/* This file has the methods that help the content-methods. Shared helpers. */
|
||||
|
||||
const (
|
||||
xPollerError = "X-Poller-Error"
|
||||
mimeJSON = "application/json"
|
||||
mimeHTML = "text/plain; charset=utf-8"
|
||||
)
|
||||
|
||||
// basicAuth wraps web requests with simple auth (and logging).
|
||||
// Called on nearly every request.
|
||||
func (s *Server) basicAuth(handler http.HandlerFunc) http.HandlerFunc {
|
||||
return s.handleLog(func(w http.ResponseWriter, r *http.Request) {
|
||||
if s.Accounts.PasswordIsCorrect(r.BasicAuth()) {
|
||||
handler(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Enter Name and Password to Login!"`)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
})
|
||||
}
|
||||
|
||||
// handleLog writes an Apache-like log line. Called on every request.
|
||||
func (s *Server) handleLog(handler http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Scheme = "https"; r.TLS == nil {
|
||||
r.URL.Scheme = "http" // Set schema early in case another handler uses it.
|
||||
}
|
||||
|
||||
// Use custom ResponseWriter to catch and log response data.
|
||||
response := &ResponseWriter{Writer: w, Start: time.Now()}
|
||||
handler(response, r) // Run provided handler with custom ResponseWriter.
|
||||
|
||||
user, _, _ := r.BasicAuth()
|
||||
if user == "" {
|
||||
user = "-" // Only used for logs.
|
||||
}
|
||||
|
||||
logf := s.Logf // Standard log.
|
||||
if response.Error != "" {
|
||||
logf = s.LogErrorf // Format an error log.
|
||||
response.Error = ` "` + response.Error + `"`
|
||||
}
|
||||
|
||||
remote, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
remote = r.RemoteAddr
|
||||
}
|
||||
|
||||
logf(`%s %s %s [%v] "%s %s://%s%s %s" %d %d "%s" "%s" %v%s`, remote, poller.AppName,
|
||||
user, response.Start.Format("01/02/2006:15:04:05 -07:00"), r.Method, r.URL.Scheme,
|
||||
r.Host, r.RequestURI, r.Proto, response.Code, response.Size, r.Referer(),
|
||||
r.UserAgent(), time.Since(response.Start).Round(time.Microsecond), response.Error)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMissing returns a blank 404.
|
||||
func (s *Server) handleMissing(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", mimeHTML)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte("404 page not found\n"))
|
||||
}
|
||||
|
||||
// handleError is a pass-off function when a request returns an error.
|
||||
func (s *Server) handleError(w http.ResponseWriter, err error) {
|
||||
w.Header().Set("Content-Type", mimeHTML)
|
||||
w.Header().Set(xPollerError, err.Error()) // signal
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte(err.Error() + "\n"))
|
||||
}
|
||||
|
||||
// handleDone is a pass-off function to finish a request.
|
||||
func (s *Server) handleDone(w http.ResponseWriter, b []byte, cType string) {
|
||||
w.Header().Set("Content-Type", cType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(append(b, []byte("\n")...))
|
||||
}
|
||||
|
||||
// handleJSON sends a json-formatted data reply.
|
||||
func (s *Server) handleJSON(w http.ResponseWriter, data interface{}) {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
s.handleError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
s.handleDone(w, b, mimeJSON)
|
||||
}
|
||||
|
||||
/* Custom http.ResponseWriter interface method and struct overrides. */
|
||||
|
||||
// ResponseWriter is used to override http.ResponseWriter in our http.FileServer.
|
||||
// This allows us to catch and log the response code, size and error; maybe others.
|
||||
type ResponseWriter struct {
|
||||
Code int
|
||||
Size int
|
||||
Error string
|
||||
Start time.Time
|
||||
Writer http.ResponseWriter
|
||||
}
|
||||
|
||||
// Header sends a header to a client. Satisfies http.ResponseWriter interface.
|
||||
func (w *ResponseWriter) Header() http.Header {
|
||||
return w.Writer.Header()
|
||||
}
|
||||
|
||||
// Write sends bytes to the client. Satisfies http.ResponseWriter interface.
|
||||
// This also adds the written byte count to our size total.
|
||||
func (w *ResponseWriter) Write(b []byte) (int, error) {
|
||||
size, err := w.Writer.Write(b)
|
||||
w.Size += size
|
||||
|
||||
return size, fmt.Errorf("writing response: %w", err)
|
||||
}
|
||||
|
||||
// WriteHeader sends an http StatusCode to a client. Satisfies http.ResponseWriter interface.
|
||||
// This custom override method also saves the status code, and any error message (for logs).
|
||||
func (w *ResponseWriter) WriteHeader(code int) {
|
||||
w.Error = w.Header().Get(xPollerError) // Catch and save any response error.
|
||||
w.Header().Del(xPollerError) // Delete the temporary signal header.
|
||||
w.Code = code // Save the status code.
|
||||
w.Writer.WriteHeader(code) // Pass the request through.
|
||||
}
|
||||
Loading…
Reference in New Issue