Merge pull request #159 from unifi-poller/dn2_multi_controller

v2: Multi Controller Support
This commit is contained in:
David Newhall II 2019-12-28 16:07:57 -08:00 committed by GitHub
commit f5830d903d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 2279 additions and 1504 deletions

1
.gitignore vendored
View File

@ -27,3 +27,4 @@ bitly_token
github_deploy_key
gpg.signing.key
.secret-files.tar
*.so

View File

@ -11,7 +11,7 @@ HBREPO="golift/homebrew-mugs"
MAINT="David Newhall II <david at sleepers dot pro>"
VENDOR="Go Lift <code at golift dot io>"
DESC="Polls a UniFi controller, exports metrics to InfluxDB and Prometheus"
GOLANGCI_LINT_ARGS="--enable-all -D gochecknoglobals -D dupl -D lll -D funlen -D wsl -e G402"
GOLANGCI_LINT_ARGS="--enable-all -D gochecknoglobals -D funlen -e G402 -D gochecknoinits"
# Example must exist at examples/$CONFIG_FILE.example
CONFIG_FILE="up.conf"
LICENSE="MIT"
@ -25,30 +25,20 @@ export BINARY GHUSER HBREPO MAINT VENDOR DESC GOLANGCI_LINT_ARGS CONFIG_FILE LIC
# Fix the repo if it doesn't match the binary name.
# Provide a better URL if one exists.
# Used as go import path in docker and homebrew builds.
IMPORT_PATH="github.com/${GHUSER}/${BINARY}"
# Used for source links and wiki links.
SOURCE_URL="https://${IMPORT_PATH}"
SOURCE_URL="https://github.com/${GHUSER}/${BINARY}"
# Used for documentation links.
URL="${SOURCE_URL}"
# This parameter is passed in as -X to go build. Used to override the Version variable in a package.
# This makes a path like github.com/user/hello-world/helloworld.Version=1.3.3
# Name the Version-containing library the same as the github repo, without dashes.
VERSION_PATH="${IMPORT_PATH}/pkg/poller.Version"
# Dynamic. Recommend not changing.
VVERSION=$(git describe --abbrev=0 --tags $(git rev-list --tags --max-count=1))
VERSION="$(echo $VVERSION | tr -d v | grep -E '^\S+$' || echo development)"
# This produces a 0 in some envirnoments (like Homebrew), but it's only used for packages.
ITERATION=$(git rev-list --count --all || echo 0)
DATE="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
BRANCH="$(git rev-parse --abbrev-ref HEAD || echo unknown)"
COMMIT="$(git rev-parse --short HEAD || echo 0)"
# Used by homebrew downloads.
#SOURCE_PATH=https://codeload.${IMPORT_PATH}/tar.gz/v${VERSION}
# This is a custom download path for homebrew formula.
SOURCE_PATH=https://golift.io/${BINARY}/archive/v${VERSION}.tar.gz
export IMPORT_PATH SOURCE_URL URL VERSION_PATH VVERSION VERSION ITERATION DATE BRANCH COMMIT SOURCE_PATH
export SOURCE_URL URL VVERSION VERSION ITERATION DATE COMMIT SOURCE_PATH

View File

@ -16,7 +16,7 @@ addons:
- gnupg
- expect
go:
- 1.12.x
- 1.13.x
services:
- docker
install:

139
Gopkg.lock generated
View File

@ -1,139 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
name = "github.com/BurntSushi/toml"
packages = ["."]
pruneopts = "UT"
revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
version = "v0.3.1"
[[projects]]
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
version = "v1.0.1"
[[projects]]
digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
name = "github.com/golang/protobuf"
packages = ["proto"]
pruneopts = "UT"
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
version = "v1.3.2"
[[projects]]
branch = "master"
digest = "1:00e5ad58045d6d2a6c9e65d1809ff2594bc396e911712ae892a93976fdece115"
name = "github.com/influxdata/influxdb1-client"
packages = [
"models",
"pkg/escape",
"v2",
]
pruneopts = "UT"
revision = "8bf82d3c094dc06be9da8e5bf9d3589b6ea032ae"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:eb04f69c8991e52eff33c428bd729e04208bf03235be88e4df0d88497c6861b9"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/internal",
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:982be0b5396e16a663697899ce69cc7b1e71ddcae4153af157578d4dc9bc3f88"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e"
[[projects]]
digest = "1:7dec9ab2db741c280b89b142b08ea142824152c5f40fb1f90c35b6ef7a694456"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
"version",
]
pruneopts = "UT"
revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
version = "v0.7.0"
[[projects]]
digest = "1:ec0ff4bd619a67065e34d6477711ed0117e335f99059a4c508e0fe21cfe7b304"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/fs",
"internal/util",
]
pruneopts = "UT"
revision = "6d489fc7f1d9cd890a250f3ea3431b1744b9623f"
version = "v0.0.8"
[[projects]]
digest = "1:524b71991fc7d9246cc7dc2d9e0886ccb97648091c63e30eef619e6862c955dd"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
version = "v1.0.5"
[[projects]]
branch = "master"
digest = "1:68fe4216878f16dd6ef33413365fbbe8d2eb781177c7adab874cfc752ce96a7e"
name = "golang.org/x/sys"
packages = ["windows"]
pruneopts = "UT"
revision = "ac6580df4449443a05718fd7858c1f91ad5f8d20"
[[projects]]
digest = "1:2883cea734f2766f41ff9c9d4aefccccc53e3d44f5c8b08893b9c218cf666722"
name = "golift.io/unifi"
packages = ["."]
pruneopts = "UT"
revision = "a607fe940c6a563c6994f2c945394b19d2183b1c"
version = "v4.1.6"
[[projects]]
digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
version = "v2.2.7"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/BurntSushi/toml",
"github.com/influxdata/influxdb1-client/v2",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/prometheus/common/version",
"github.com/spf13/pflag",
"golift.io/unifi",
"gopkg.in/yaml.v2",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,9 +0,0 @@
# dep configuration file
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
[prune]
go-tests = true
unused-packages = true

View File

@ -8,6 +8,7 @@ IGNORED:=$(shell bash -c "source .metadata.sh ; env | sed 's/=/:=/;s/^/export /'
# md2roff turns markdown into man files and html files.
MD2ROFF_BIN=github.com/github/hub/md2roff-bin
# Travis CI passes the version in. Local builds get it from the current git tag.
ifeq ($(VERSION),)
include .metadata.make
@ -42,11 +43,13 @@ $(PACKAGE_SCRIPTS) \
--config-files "/etc/$(BINARY)/$(CONFIG_FILE)"
endef
PLUGINS:=$(patsubst plugins/%/main.go,%,$(wildcard plugins/*/main.go))
VERSION_LDFLAGS:= \
-X $(IMPORT_PATH)/vendor/github.com/prometheus/common/version.Branch=$(BRANCH) \
-X $(IMPORT_PATH)/vendor/github.com/prometheus/common/version.BuildDate=$(DATE) \
-X $(IMPORT_PATH)/vendor/github.com/prometheus/common/version.Revision=$(COMMIT) \
-X $(VERSION_PATH)=$(VERSION)-$(ITERATION)
-X github.com/prometheus/common/version.Branch=$(TRAVIS_BRANCH) \
-X github.com/prometheus/common/version.BuildDate=$(DATE) \
-X github.com/prometheus/common/version.Revision=$(COMMIT) \
-X github.com/prometheus/common/version.Version=$(VERSION)-$(ITERATION)
# Makefile targets follow.
@ -183,12 +186,14 @@ $(BINARY)_$(VERSION)-$(ITERATION)_armhf.deb: package_build_linux_armhf check_fpm
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn debsigs --default-key="$(SIGNING_KEY)" --sign=origin $(BINARY)_$(VERSION)-$(ITERATION)_armhf.deb; expect -exact \"Enter passphrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
# Build an environment that can be packaged for linux.
package_build_linux: readme man linux
package_build_linux: readme man plugins_linux_amd64 linux
# Building package environment for linux.
mkdir -p $@/usr/bin $@/etc/$(BINARY) $@/usr/share/man/man1 $@/usr/share/doc/$(BINARY)
mkdir -p $@/usr/bin $@/etc/$(BINARY) $@/usr/share/man/man1 $@/usr/share/doc/$(BINARY) $@/usr/lib/$(BINARY)
# Copying the binary, config file, unit file, and man page into the env.
cp $(BINARY).amd64.linux $@/usr/bin/$(BINARY)
cp *.1.gz $@/usr/share/man/man1
rm -f $@/usr/lib/$(BINARY)/*.so
cp *amd64.so $@/usr/lib/$(BINARY)/
cp examples/$(CONFIG_FILE).example $@/etc/$(BINARY)/
cp examples/$(CONFIG_FILE).example $@/etc/$(BINARY)/$(CONFIG_FILE)
cp LICENSE *.html examples/*?.?* $@/usr/share/doc/$(BINARY)/
@ -226,7 +231,6 @@ docker:
--build-arg "VENDOR=$(VENDOR)" \
--build-arg "AUTHOR=$(MAINT)" \
--build-arg "BINARY=$(BINARY)" \
--build-arg "IMPORT_PATH=$(IMPORT_PATH)" \
--build-arg "SOURCE_URL=$(SOURCE_URL)" \
--build-arg "CONFIG_FILE=$(CONFIG_FILE)" \
--tag $(BINARY) .
@ -245,7 +249,6 @@ $(BINARY).rb: v$(VERSION).tar.gz.sha256 init/homebrew/$(FORMULA).rb.tmpl
-e "s/{{SHA256}}/$(shell head -c64 $<)/g" \
-e "s/{{Desc}}/$(DESC)/g" \
-e "s%{{URL}}%$(URL)%g" \
-e "s%{{IMPORT_PATH}}%$(IMPORT_PATH)%g" \
-e "s%{{SOURCE_PATH}}%$(SOURCE_PATH)%g" \
-e "s%{{SOURCE_URL}}%$(SOURCE_URL)%g" \
-e "s%{{CONFIG_FILE}}%$(CONFIG_FILE)%g" \
@ -253,6 +256,19 @@ $(BINARY).rb: v$(VERSION).tar.gz.sha256 init/homebrew/$(FORMULA).rb.tmpl
init/homebrew/$(FORMULA).rb.tmpl | tee $(BINARY).rb
# That perl line turns hello-world into HelloWorld, etc.
plugins: $(patsubst %,%.so,$(PLUGINS))
$(patsubst %,%.so,$(PLUGINS)):
go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.so,%,$@)
linux_plugins: plugins_linux_amd64 plugins_linux_i386 plugins_linux_arm64 plugins_linux_armhf
plugins_linux_amd64: $(patsubst %,%.linux_amd64.so,$(PLUGINS))
$(patsubst %,%.linux_amd64.so,$(PLUGINS)):
GOOS=linux GOARCH=amd64 go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.linux_amd64.so,%,$@)
plugins_darwin: $(patsubst %,%.darwin.so,$(PLUGINS))
$(patsubst %,%.darwin.so,$(PLUGINS)):
GOOS=darwin go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.darwin.so,%,$@)
# Extras
# Run code tests and lint.
@ -265,17 +281,18 @@ lint:
# This is safe; recommended even.
dep: vendor
vendor: Gopkg.*
dep ensure --vendor-only
vendor: go.mod go.sum
go mod vendor
# Don't run this unless you're ready to debug untested vendored dependencies.
deps:
dep ensure --update
deps: update vendor
update:
go get -u -d
# Homebrew stuff. macOS only.
# Used for Homebrew only. Other distros can create packages.
install: man readme $(BINARY)
install: man readme $(BINARY) plugins_darwin
@echo - Done Building! -
@echo - Local installation with the Makefile is only supported on macOS.
@echo If you wish to install the application manually on Linux, check out the wiki: https://$(SOURCE_URL)/wiki/Installation
@ -285,8 +302,9 @@ install: man readme $(BINARY)
@[ "$(PREFIX)" != "" ] || (echo "Unable to continue, PREFIX not set. Use: make install PREFIX=/usr/local ETC=/usr/local/etc" && false)
@[ "$(ETC)" != "" ] || (echo "Unable to continue, ETC not set. Use: make install PREFIX=/usr/local ETC=/usr/local/etc" && false)
# Copying the binary, config file, unit file, and man page into the env.
/usr/bin/install -m 0755 -d $(PREFIX)/bin $(PREFIX)/share/man/man1 $(ETC)/$(BINARY) $(PREFIX)/share/doc/$(BINARY)
/usr/bin/install -m 0755 -d $(PREFIX)/bin $(PREFIX)/share/man/man1 $(ETC)/$(BINARY) $(PREFIX)/share/doc/$(BINARY) $(PREFIX)/lib/$(BINARY)
/usr/bin/install -m 0755 -cp $(BINARY) $(PREFIX)/bin/$(BINARY)
/usr/bin/install -m 0755 -cp *darwin.so $(PREFIX)/lib/$(BINARY)/
/usr/bin/install -m 0644 -cp $(BINARY).1.gz $(PREFIX)/share/man/man1
/usr/bin/install -m 0644 -cp examples/$(CONFIG_FILE).example $(ETC)/$(BINARY)/
[ -f $(ETC)/$(BINARY)/$(CONFIG_FILE) ] || /usr/bin/install -m 0644 -cp examples/$(CONFIG_FILE).example $(ETC)/$(BINARY)/$(CONFIG_FILE)

View File

@ -65,109 +65,16 @@ is provided so the application can be easily adapted to any environment.
`Config File Parameters`
sites default: ["all"]
This list of strings should represent the names of sites on the UniFi
controller that will be polled for data. Pass `all` in the list to
poll all sites. On startup, the application prints out all site names
found in the controller; they're cryptic, but they have the human-name
next to them. The cryptic names go into the config file `sites` list.
The controller's first site is not cryptic and is named `default`.
Configuration file (up.conf) parameters are documented in the wiki.
interval default: 30s
How often to poll the controller for updated client and device data.
The UniFi Controller only updates traffic stats about every 30-60 seconds.
Only works if "mode" (below) is "influx" - other modes do not use interval.
* [https://github.com/davidnewhall/unifi-poller/wiki/Configuration](https://github.com/davidnewhall/unifi-poller/wiki/Configuration)
debug default: false
This turns on time stamps and line numbers in logs, outputs a few extra
lines of information while processing.
`Shell Environment Parameters`
quiet default: false
Setting this to true will turn off per-device and per-interval logs. Only
errors will be logged. Using this with debug=true adds line numbers to
any error logs.
This application can be fully configured using shell environment variables.
Find documentation for this feature on the Docker Wiki page.
mode default: "influx"
* Value: influx
This default mode runs this application as a daemon. It will poll
the controller at the configured interval and report measurements to
InfluxDB. Providing an invalid value will run in this default mode.
* Value: influxlambda
Setting this value will invoke a run-once mode where the application
immediately polls the controller and reports the metrics to InfluxDB.
Then it exits. This mode is useful in an AWS Lambda or a crontab where
the execution timings are controlled. This mode may also be adapted
to run in other collector scripts and apps like telegraf or diamond.
This mode can also be combined with a "test database" in InfluxDB to
give yourself a "test config file" you may run ad-hoc to test changes.
* Value: prometheus
In this mode the application opens an http interface and exports the
measurements at /metrics for collection by prometheus. Enabling this
mode disables InfluxDB usage entirely.
* Value: both
Setting the mode to "both" will cause the InfluxDB poller routine to run
along with the Prometheus exporter. You can run both at the same time.
http_listen default: 0.0.0.0:9130
This option controls the IP and port the http listener uses when the
mode is set to prometheus. This setting has no effect when other modes
are in use. Metrics become available at the /metrics URI.
influx_url default: http://127.0.0.1:8086
This is the URL where the Influx web server is available.
influx_user default: unifi
Username used to authenticate with InfluxDB.
influx_pass default: unifi
Password used to authenticate with InfluxDB.
influx_db default: unifi
Custom database created in InfluxDB to use with this application.
On first setup, log into InfluxDB and create access:
$ influx -host localhost -port 8086
CREATE DATABASE unifi
CREATE USER unifi WITH PASSWORD 'unifi' WITH ALL PRIVILEGES
GRANT ALL ON unifi TO unifi
influx_insecure_ssl default: false
Setting this to true will allow use of InfluxDB with an invalid SSL certificate.
unifi_url default: https://127.0.0.1:8443
This is the URL where the UniFi Controller is available.
unifi_user default: influxdb
Username used to authenticate with UniFi controller. This should be a
special service account created on the control with read-only access.
unifi_user no default ENV: UNIFI_PASSWORD
Password used to authenticate with UniFi controller. This can also be
set in an environment variable instead of a configuration file.
save_ids default: false
Setting this parameter to true will enable collection of Intrusion
Detection System data. IDS and IPS are the same data set. This is off
by default because most controllers do not have this enabled. It also
creates a lot of new metrics from controllers with a lot of IDS entries.
IDS data does not contain metrics, so this doesn't work with Prometheus.
save_sites
Setting this parameter to false will disable saving Network Site data.
This data populates the Sites dashboard, and this setting affects influx
and prometheus.
reauthenticate default: false
Setting this parameter to true will make UniFi Poller send a new login
request on every interval. This generates a new cookie. Some controller
or reverse proxy configurations require this. Do not enable it unless
your configuration causes the poller to be logged out after some time.
verify_ssl default: false
If your UniFi controller has a valid SSL certificate, you can enable
this option to validate it. Otherwise, any SSL certificate is valid.
* [https://github.com/davidnewhall/unifi-poller/wiki/Docker](https://github.com/davidnewhall/unifi-poller/wiki/Docker)
GO DURATION
---

View File

@ -1,78 +1,104 @@
# UniFi Poller primary configuration file. TOML FORMAT #
# commented lines are defaults, uncomment to change. #
########################################################
# If the controller has more than one site, specify which sites to poll here.
# Set this to ["default"] to poll only the first site on the controller.
# A setting of ["all"] will poll all sites; this works if you only have 1 site too.
sites = ["all"]
[poller]
# Turns on line numbers, microsecond logging, and a per-device log.
# The default is false, but I personally leave this on at home (four devices).
# This may be noisy if you have a lot of devices. It adds one line per device.
debug = false
# The UniFi Controller only updates traffic stats about every 30 seconds.
# Setting this to something lower may lead to "zeros" in your data.
# If you're getting zeros now, set this to "1m"
interval = "30s"
# Turns off per-interval logs. Only startup and error logs will be emitted.
# Recommend enabling debug with this setting for better error logging.
quiet = false
# Turns on line numbers, microsecond logging, and a per-device log.
# The default is false, but I personally leave this on at home (four devices).
# This may be noisy if you have a lot of devices. It adds one line per device.
debug = false
# Load dynamic plugins. Advanced use; only sample mysql plugin provided by default.
plugins = []
# Turns off per-interval logs. Only startup and error logs will be emitted.
# Recommend enabling debug with this setting for better error logging.
quiet = false
#### OUTPUTS
# Which mode to run this application in. The default mode is "influx". Providing
# an invalid mode will also result in "influx". In this default mode the application
# runs as a daemon and polls the controller at the configured interval.
#
# Other options: "influxlambda", "prometheus", "both"
#
# Mode "influxlambda" makes the application exit after collecting and reporting metrics
# to InfluxDB one time. This mode requires an external process like an AWS Lambda
# or a simple crontab to keep the timings accurate on UniFi Poller run intervals.
#
# Mode "prometheus" opens an HTTP server on port 9130 and exports the metrics at
# /metrics for polling collection by a prometheus server. This disables influxdb.
#
# Mode "both" runs the Prometheus HTTP server and InfluxDB poller interval at
# the same time.
mode = "influx"
# If you don't use an output, you can disable it.
# This controls on which ip and port /metrics is exported when mode is "prometheus".
# This has no effect in other modes. Must contain a colon and port.
http_listen = "0.0.0.0:9130"
[prometheus]
disable = false
# This controls on which ip and port /metrics is exported when mode is "prometheus".
# This has no effect in other modes. Must contain a colon and port.
http_listen = "0.0.0.0:9130"
report_errors = false
# InfluxDB does not require auth by default, so the user/password are probably unimportant.
influx_url = "http://127.0.0.1:8086"
influx_user = "unifi"
influx_pass = "unifi"
# Be sure to create this database.
influx_db = "unifi"
# If your InfluxDB uses an invalid SSL cert, set this to true.
influx_insecure_ssl = false
[influxdb]
disable = false
# InfluxDB does not require auth by default, so the user/password are probably unimportant.
url = "http://127.0.0.1:8086"
user = "unifipoller"
pass = "unifipoller"
# Be sure to create this database.
db = "unifi"
# If your InfluxDB uses a valid SSL cert, set this to true.
verify_ssl = false
# The UniFi Controller only updates traffic stats about every 30 seconds.
# Setting this to something lower may lead to "zeros" in your data.
# If you're getting zeros now, set this to "1m"
interval = "30s"
# Make a read-only user in the UniFi Admin Settings.
unifi_user = "influx"
# You may also set env variable UNIFI_PASSWORD instead of putting this in the config.
unifi_pass = "4BB9345C-2341-48D7-99F5-E01B583FF77F"
unifi_url = "https://127.0.0.1:8443"
#### INPUTS
# Some controllers or reverse proxy configurations do not allow cookies to be
# re-user on every request (every interval). This setting provides a workaround
# That causes the poller to re-auth (login) to the controller on every interval.
# Only enable this if you get login errors after 30 seconds. This will generate
# a few more requests to your controller every interval.
reauthenticate = false
[unifi]
# Setting this to true and providing default credentials allows you to skip
# configuring controllers in this config file. Instead you configure them in
# your prometheus.yml config. Prometheus then sends the controller URL to
# unifi-poller when it performs the scrape. This is useful if you have many,
# or changing controllers. Most people can leave this off. See wiki for more.
dynamic = false
# Enable collection of Intrusion Detection System Data (InfluxDB only).
# Only useful if IDS or IPS are enabled on one of the sites.
save_ids = false
# The following section contains the default credentials/configuration for any
# dynamic controller (see above section), or the primary controller if you do not
# provide one and dynamic is disabled. In other words, you can just add your
# controller here and delete the following section. Either works.
[unifi.defaults]
role = "https://127.0.0.1:8443"
url = "https://127.0.0.1:8443"
user = "unifipoller"
pass = "unifipoller"
sites = ["all"]
save_ids = false
save_dpi = false
save_sites = true
verify_ssl = false
# Enable collection of site data. This data powers the Network Sites dashboard.
# It's not valuable to everyone and setting this to false will save resources.
save_sites = true
# You may repeat the following section to poll additional controllers.
[[unifi.controller]]
# Friendly name used in dashboards. Uses URL if left empty; which is fine.
# Avoid changing this later because it will live forever in your database.
# Multiple controllers may share a role. This allows grouping during scrapes.
role = ""
# If your UniFi controller has a valid SSL certificate (like lets encrypt),
# you can enable this option to validate it. Otherwise, any SSL certificate is
# valid. If you don't know if you have a valid SSL cert, then you don't have one.
verify_ssl = false
url = "https://127.0.0.1:8443"
# Make a read-only user in the UniFi Admin Settings.
user = "unifipoller"
pass = "4BB9345C-2341-48D7-99F5-E01B583FF77F"
# If the controller has more than one site, specify which sites to poll here.
# Set this to ["default"] to poll only the first site on the controller.
# A setting of ["all"] will poll all sites; this works if you only have 1 site too.
sites = ["all"]
# Enable collection of Intrusion Detection System Data (InfluxDB only).
# Only useful if IDS or IPS are enabled on one of the sites.
save_ids = false
# Enable collection of Deep Packet Inspection data. This data breaks down traffic
# types for each client and site, it powers a dedicated DPI dashboard.
# Enabling this adds roughly 150 data points per client. That's 6000 metrics for
# 40 clients. This adds a little bit of poller run time per interval and causes
# more API requests to your controller(s). Don't let these "cons" sway you:
# it's cool data. Please provide feedback on your experience with this feature.
save_dpi = false
# Enable collection of site data. This data powers the Network Sites dashboard.
# It's not valuable to everyone and setting this to false will save resources.
save_sites = true
# If your UniFi controller has a valid SSL certificate (like lets encrypt),
# you can enable this option to validate it. Otherwise, any SSL certificate is
# valid. If you don't know if you have a valid SSL cert, then you don't have one.
verify_ssl = false

View File

@ -1,20 +1,51 @@
{
"sites": ["all"],
"interval": "30s",
"debug": false,
"quiet": false,
"mode": "influx",
"http_listen": "0.0.0.0:9130",
"influx_url": "http://127.0.0.1:8086",
"influx_user": "unifi",
"influx_pass": "unifi",
"influx_db": "unifi",
"influx_insecure_ssl": false,
"unifi_user": "influx",
"unifi_pass": "",
"unifi_url": "https://127.0.0.1:8443",
"save_ids": false,
"save_sites": true,
"reauthenticate": false,
"verify_ssl": false
"poller": {
"debug": false,
"quiet": false,
"plugins": []
},
"prometheus": {
"disable": false,
"http_listen": "0.0.0.0:9130",
"report_errors": false
},
"influxdb": {
"disable": false,
"url": "http://127.0.0.1:8086",
"user": "unifipoller",
"pass": "unifipoller",
"db": "unifi",
"verify_ssl": false,
"interval": "30s"
},
"unifi": {
"dynamic": false,
"defaults": {
"role": "https://127.0.0.1:8443",
"user": "unifipoller",
"pass": "unifipoller",
"url": "https://127.0.0.1:8443",
"sites": ["all"],
"save_ids": false,
"save_dpi": false,
"save_sites": true,
"verify_ssl": false
},
"controllers": [
{
"role": "",
"user": "unifipoller",
"pass": "unifipoller",
"url": "https://127.0.0.1:8443",
"sites": ["all"],
"save_dpi": false,
"save_ids": false,
"save_sites": true,
"verify_ssl": false
}
]
}
}

View File

@ -4,31 +4,49 @@
# UniFi Poller primary configuration file. XML FORMAT #
# provided values are defaults. See up.conf.example! #
#######################################################
<plugin> and <site> are lists of strings and may be repeated.
-->
<unifi-poller>
<poller debug="false" quiet="false">
<!-- plugin></plugin -->
<sites>all</sites>
<interval>60s</interval>
<prometheus disable="false">
<http_listen>0.0.0.0:9130</http_listen>
<report_errors>false</report_errors>
</prometheus>
<debug>false</debug>
<quiet>false</quiet>
<influxdb disable="false">
<interval>30s</interval>
<url>http://127.0.0.1:8086</url>
<user>unifipoller</user>
<pass>unifipoller</pass>
<db>unifi</db>
<verify_ssl>false</verify_ssl>
</influxdb>
<mode>influx</mode>
<http_listen>0.0.0.0:9130</http_listen>
<unifi dynamic="false">
<default role="https://127.0.0.1:8443">
<site>all</site>
<user>unifipoller</user>
<pass>unifipoller</pass>
<url>https://127.0.0.1:8443</url>
<verify_ssl>false</verify_ssl>
<save_ids>false</save_ids>
<save_dpi>false</save_dpi>
<save_sites>true</save_sites>
</default>
<influx_db>unifi</influx_db>
<influx_pass>unifi</influx_pass>
<influx_url>http://127.0.0.1:8086</influx_url>
<influx_user>unifi</influx_user>
<influx_insecure_ssl>false</influx_insecure_ssl>
<!-- Repeat this stanza to poll additional controllers. -->
<controller role="">
<site>all</site>
<user>unifipoller</user>
<pass>unifipoller</pass>
<url>https://127.0.0.1:8443</url>
<verify_ssl>false</verify_ssl>
<save_ids>false</save_ids>
<save_dpi>false</save_dpi>
<save_sites>true</save_sites>
</controller>
<unifi_user>influx</unifi_user>
<unifi_pass></unifi_pass>
<unifi_url>https://127.0.0.1:8443</unifi_url>
<reauthenticate>false</reauthenticate>
<verify_ssl>false</verify_ssl>
<save_ids>false</save_ids>
<save_sites>true</save_sites>
</unifi-poller>
</unifi>
</poller>

View File

@ -3,26 +3,50 @@
# provided values are defaults. See up.conf.example! #
########################################################
---
sites:
- all
interval: "30s"
debug: false
quiet: false
poller:
debug: false
quiet: false
plugins: []
mode: "influx"
http_listen: "0.0.0.0:9130"
prometheus:
disable: false
http_listen: "0.0.0.0:9130"
report_errors: false
influx_url: "http://127.0.0.1:8086"
influx_user: "unifi"
influx_pass: "unifi"
influx_db: "unifi"
influx_insecure_ssl: false
influxdb:
disable: false
interval: "30s"
url: "http://127.0.0.1:8086"
user: "unifipoller"
pass: "unifipoller"
db: "unifi"
verify_ssl: false
unifi_user: "influx"
unifi_pass: ""
unifi_url: "https://127.0.0.1:8443"
reauthenticate: false
verify_ssl: false
save_ids: false
save_sites: true
unifi:
dynamic: false
defaults:
role: "https://127.0.0.1:8443"
user: "unifipoller"
pass: "unifipoller"
url: "https://127.0.0.1:8443"
sites:
- all
verify_ssl: false
save_ids: false
save_dpi: false
save_sites: true
controllers:
# Repeat the following stanza to poll more controllers.
- role: ""
user: "unifipoller"
pass: "unifipoller"
url: "https://127.0.0.1:8443"
sites:
- all
verify_ssl: false
save_ids: false
save_dpi: false
save_sites: true

13
go.mod Normal file
View File

@ -0,0 +1,13 @@
module github.com/davidnewhall/unifi-poller
go 1.13
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/prometheus/client_golang v1.3.0
github.com/prometheus/common v0.7.0
github.com/spf13/pflag v1.0.5
golift.io/cnfg v0.0.5
golift.io/unifi v0.0.400
)

91
go.sum Normal file
View File

@ -0,0 +1,91 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golift.io/cnfg v0.0.5 h1:HnMU8Z9C/igKvir1dqaHx5BPuNGZrp99FCtdJyP2Z4I=
golift.io/cnfg v0.0.5/go.mod h1:ScFDIJg/rJGHbRaed/i7g1lBhywEjB0JiP2uZr3xC3A=
golift.io/unifi v0.0.400 h1:r8FlE+p+zmm8jnQdT367H2aGVMTgxZTrHSwbsHBcayA=
golift.io/unifi v0.0.400/go.mod h1:4BjegFlwA3am3mPlY0qHAnSKli4eexLQV42QKaRx9OY=
golift.io/unifi v4.1.6+incompatible h1:Yhb/+obX2vT9i6PElGislSuQ1WUtOf+l+sRjVxlY6nM=
golift.io/unifi v4.1.6+incompatible/go.mod h1:Zjw57ZAzTzCMw784pE8CdCFgkYSzVZzmJ++WUttbjto=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -9,23 +9,17 @@ ARG BUILD_DATE=0
ARG COMMIT=0
ARG VERSION=unknown
ARG BINARY=application-builder
ARG IMPORT_PATH=github.com/golift/application-builder
FROM golang:stretch as builder
ARG ARCH
ARG OS
ARG BINARY
ARG IMPORT_PATH
RUN mkdir -p $GOPATH/pkg/mod $GOPATH/bin $GOPATH/src/${IMPORT_PATH}
RUN apt-get update \
&& apt-get install -y curl \
&& curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
RUN mkdir -p $GOPATH/pkg/mod $GOPATH/bin $GOPATH/src /${BINARY}
COPY . /${BINARY}
WORKDIR /${BINARY}
COPY . $GOPATH/src/${IMPORT_PATH}
WORKDIR $GOPATH/src/${IMPORT_PATH}
RUN dep ensure --vendor-only \
RUN go mod vendor \
&& CGO_ENABLED=0 make ${BINARY}.${ARCH}.${OS}
FROM scratch
@ -36,7 +30,6 @@ ARG COMMIT
ARG VERSION
ARG LICENSE=MIT
ARG BINARY
ARG IMPORT_PATH
ARG SOURCE_URL=http://github.com/golift/application-builder
ARG URL=http://github.com/golift/application-builder
ARG DESC=application-builder
@ -58,8 +51,8 @@ LABEL org.opencontainers.image.created="${BUILD_DATE}" \
org.opencontainers.image.licenses="${LICENSE}" \
org.opencontainers.image.version="${VERSION}"
COPY --from=builder /go/src/${IMPORT_PATH}/${BINARY}.${ARCH}.${OS} /image
COPY --from=builder /go/src/${IMPORT_PATH}/examples/${CONFIG_FILE}.example /etc/${BINARY}/${CONFIG_FILE}
COPY --from=builder /${BINARY}/${BINARY}.${ARCH}.${OS} /image
COPY --from=builder /${BINARY}/examples/${CONFIG_FILE}.example /etc/${BINARY}/${CONFIG_FILE}
COPY --from=builder /etc/ssl /etc/ssl
VOLUME [ "/etc/${BINARY}" ]

View File

@ -28,7 +28,6 @@ for build in $BUILDS; do
--build-arg "VENDOR=${VENDOR}" \
--build-arg "AUTHOR=${MAINT}" \
--build-arg "BINARY=${BINARY}" \
--build-arg "IMPORT_PATH=${IMPORT_PATH}" \
--build-arg "SOURCE_URL=${SOURCE_URL}" \
--build-arg "CONFIG_FILE=${CONFIG_FILE}" \
--tag "${IMAGE_NAME}_${os}_${name}" \

View File

@ -1,7 +1,6 @@
# Homebrew Formula Template. Built by Makefile: `make fomula`
# This is part of Application Builder.
# https://github.com/golift/application-builder
# This file is used when FORMULA is set to 'service'.
class {{Class}} < Formula
desc "{{Desc}}"
homepage "{{URL}}"
@ -13,14 +12,11 @@ class {{Class}} < Formula
depends_on "dep"
def install
ENV["GOPATH"] = buildpath
bin_path = buildpath/"src/{{IMPORT_PATH}}"
# Copy all files from their current location (GOPATH root)
# to $GOPATH/src/{{IMPORT_PATH}}
bin_path = buildpath/"#{name}"
# Copy all files from their current location to buildpath/#{name}
bin_path.install Dir["*",".??*"]
cd bin_path do
system "dep", "ensure", "--vendor-only"
system "make" "vendor"
system "make", "install", "VERSION=#{version}", "ITERATION={{Iter}}", "PREFIX=#{prefix}", "ETC=#{etc}"
# If this fails, the user gets a nice big warning about write permissions on their
# #{var}/log folder. The alternative could be letting the app silently fail

View File

@ -4,6 +4,11 @@ import (
"log"
"github.com/davidnewhall/unifi-poller/pkg/poller"
// Load input plugins!
_ "github.com/davidnewhall/unifi-poller/pkg/inputunifi"
// Load output plugins!
_ "github.com/davidnewhall/unifi-poller/pkg/influxunifi"
_ "github.com/davidnewhall/unifi-poller/pkg/promunifi"
)
// Keep it simple.

View File

@ -10,6 +10,7 @@ func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) {
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"ap_name": s.ApName,
"gw_name": s.GwName,
"sw_name": s.SwName,
@ -66,14 +67,29 @@ func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) {
"wired-tx_bytes": s.WiredTxBytes,
"wired-tx_bytes-r": s.WiredTxBytesR,
"wired-tx_packets": s.WiredTxPackets,
/*
"dpi_app": c.DpiStats.App.Val,
"dpi_cat": c.DpiStats.Cat.Val,
"dpi_rx_bytes": c.DpiStats.RxBytes.Val,
"dpi_rx_packets": c.DpiStats.RxPackets.Val,
"dpi_tx_bytes": c.DpiStats.TxBytes.Val,
"dpi_tx_packets": c.DpiStats.TxPackets.Val,
*/
}
r.send(&metric{Table: "clients", Tags: tags, Fields: fields})
}
func (u *InfluxUnifi) batchClientDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
r.send(&metric{
Table: "clientdpi",
Tags: map[string]string{
"category": unifi.DPICats.Get(dpi.Cat),
"application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
"name": s.Name,
"mac": s.MAC,
"site_name": s.SiteName,
"source": s.SourceName,
},
Fields: map[string]interface{}{
"tx_packets": dpi.TxPackets,
"rx_packets": dpi.RxPackets,
"tx_bytes": dpi.TxBytes,
"rx_bytes": dpi.RxBytes,
}},
)
}
}

View File

@ -8,6 +8,8 @@ import (
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchIDS(r report, i *unifi.IDS) {
tags := map[string]string{
"site_name": i.SiteName,
"source": i.SourceName,
"in_iface": i.InIface,
"event_type": i.EventType,
"proto": i.Proto,
@ -35,5 +37,6 @@ func (u *InfluxUnifi) batchIDS(r report, i *unifi.IDS) {
"srcipASN": i.SrcipASN,
"usgipASN": i.UsgipASN,
}
r.send(&metric{Table: "intrusion_detect", Tags: tags, Fields: fields})
}

295
pkg/influxunifi/influxdb.go Normal file
View File

@ -0,0 +1,295 @@
// Package influxunifi provides the methods to turn UniFi measurements into influx
// data-points with appropriate tags and fields.
package influxunifi
import (
"crypto/tls"
"fmt"
"log"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
influx "github.com/influxdata/influxdb1-client/v2"
"golift.io/cnfg"
)
const (
defaultInterval = 30 * time.Second
minimumInterval = 10 * time.Second
defaultInfluxDB = "unifi"
defaultInfluxUser = "unifipoller"
defaultInfluxURL = "http://127.0.0.1:8086"
)
// Config defines the data needed to store metrics in InfluxDB
type Config struct {
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
URL string `json:"url,omitempty" toml:"url,omitempty" xml:"url" yaml:"url"`
User string `json:"user,omitempty" toml:"user,omitempty" xml:"user" yaml:"user"`
Pass string `json:"pass,omitempty" toml:"pass,omitempty" xml:"pass" yaml:"pass"`
DB string `json:"db,omitempty" toml:"db,omitempty" xml:"db" yaml:"db"`
}
// InfluxDB allows the data to be nested in the config file.
type InfluxDB struct {
Config Config `json:"influxdb" toml:"influxdb" xml:"influxdb" yaml:"influxdb"`
}
// InfluxUnifi is returned by New() after you provide a Config.
type InfluxUnifi struct {
Collector poller.Collect
influx influx.Client
LastCheck time.Time
*InfluxDB
}
type metric struct {
Table string
Tags map[string]string
Fields map[string]interface{}
}
func init() {
u := &InfluxUnifi{InfluxDB: &InfluxDB{}, LastCheck: time.Now()}
poller.NewOutput(&poller.Output{
Name: "influxdb",
Config: u.InfluxDB,
Method: u.Run,
})
}
// PollController runs forever, polling UniFi and pushing to InfluxDB
// This is started by Run() or RunBoth() after everything checks out.
func (u *InfluxUnifi) PollController() {
interval := u.Config.Interval.Round(time.Second)
ticker := time.NewTicker(interval)
log.Printf("[INFO] Everything checks out! Poller started, InfluxDB interval: %v", interval)
for u.LastCheck = range ticker.C {
metrics, ok, err := u.Collector.Metrics()
if err != nil {
u.Collector.LogErrorf("%v", err)
if !ok {
continue
}
}
report, err := u.ReportMetrics(metrics)
if err != nil {
// XXX: reset and re-auth? not sure..
u.Collector.LogErrorf("%v", err)
continue
}
u.LogInfluxReport(report)
}
}
// Run runs a ticker to poll the unifi server and update influxdb.
func (u *InfluxUnifi) Run(c poller.Collect) error {
var err error
if u.Config.Disable {
return nil
}
u.Collector = c
u.setConfigDefaults()
u.influx, err = influx.NewHTTPClient(influx.HTTPConfig{
Addr: u.Config.URL,
Username: u.Config.User,
Password: u.Config.Pass,
TLSConfig: &tls.Config{InsecureSkipVerify: !u.Config.VerifySSL},
})
if err != nil {
return err
}
u.PollController()
return nil
}
func (u *InfluxUnifi) setConfigDefaults() {
if u.Config.URL == "" {
u.Config.URL = defaultInfluxURL
}
if u.Config.User == "" {
u.Config.User = defaultInfluxUser
}
if u.Config.Pass == "" {
u.Config.Pass = defaultInfluxUser
}
if u.Config.DB == "" {
u.Config.DB = defaultInfluxDB
}
if u.Config.Interval.Duration == 0 {
u.Config.Interval = cnfg.Duration{Duration: defaultInterval}
} else if u.Config.Interval.Duration < minimumInterval {
u.Config.Interval = cnfg.Duration{Duration: minimumInterval}
}
u.Config.Interval = cnfg.Duration{Duration: u.Config.Interval.Duration.Round(time.Second)}
}
// ReportMetrics batches all device and client data into influxdb data points.
// Call this after you've collected all the data you care about.
// Returns an error if influxdb calls fail, otherwise returns a report.
func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics) (*Report, error) {
r := &Report{Metrics: m, ch: make(chan *metric), Start: time.Now()}
defer close(r.ch)
var err error
// Make a new Influx Points Batcher.
r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.Config.DB})
if err != nil {
return nil, fmt.Errorf("influx.NewBatchPoints: %v", err)
}
go u.collect(r, r.ch)
// Batch all the points.
u.loopPoints(r)
r.wg.Wait() // wait for all points to finish batching!
// Send all the points.
if err = u.influx.Write(r.bp); err != nil {
return nil, fmt.Errorf("influxdb.Write(points): %v", err)
}
r.Elapsed = time.Since(r.Start)
return r, nil
}
// collect runs in a go routine and batches all the points.
func (u *InfluxUnifi) collect(r report, ch chan *metric) {
for m := range ch {
pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, r.metrics().TS)
if err != nil {
r.error(err)
} else {
r.batch(m, pt)
}
r.done()
}
}
// loopPoints kicks off 3 or 7 go routines to process metrics and send them
// to the collect routine through the metric channel.
func (u *InfluxUnifi) loopPoints(r report) {
m := r.metrics()
r.add()
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.SitesDPI {
u.batchSiteDPI(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.Sites {
u.batchSite(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.ClientsDPI {
u.batchClientDPI(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.Clients {
u.batchClient(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.IDSList {
u.batchIDS(r, s)
}
}()
u.loopDevicePoints(r)
}
func (u *InfluxUnifi) loopDevicePoints(r report) {
m := r.metrics()
if m.Devices == nil {
return
}
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.UAPs {
u.batchUAP(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.USGs {
u.batchUSG(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.USWs {
u.batchUSW(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.UDMs {
u.batchUDM(r, s)
}
}()
}
// LogInfluxReport writes a log message after exporting to influxdb.
func (u *InfluxUnifi) LogInfluxReport(r *Report) {
idsMsg := fmt.Sprintf("IDS Events: %d, ", len(r.Metrics.IDSList))
u.Collector.Logf("UniFi Metrics Recorded. Sites: %d, Clients: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, %sPoints: %d, Fields: %d, Errs: %d, Elapsed: %v",
len(r.Metrics.Sites), len(r.Metrics.Clients), len(r.Metrics.UAPs),
len(r.Metrics.UDMs)+len(r.Metrics.USGs), len(r.Metrics.USWs), idsMsg, r.Total,
r.Fields, len(r.Errors), r.Elapsed.Round(time.Millisecond))
}

View File

@ -1,142 +0,0 @@
// Package influx provides the methods to turn UniFi measurements into influx
// data-points with appropriate tags and fields.
package influxunifi
import (
"crypto/tls"
"fmt"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
influx "github.com/influxdata/influxdb1-client/v2"
)
// Config defines the data needed to store metrics in InfluxDB
type Config struct {
Database string
URL string
User string
Pass string
BadSSL bool
}
// InfluxUnifi is returned by New() after you provide a Config.
type InfluxUnifi struct {
cf *Config
influx influx.Client
}
type metric struct {
Table string
Tags map[string]string
Fields map[string]interface{}
}
// New returns an InfluxDB interface.
func New(c *Config) (*InfluxUnifi, error) {
i, err := influx.NewHTTPClient(influx.HTTPConfig{
Addr: c.URL,
Username: c.User,
Password: c.Pass,
TLSConfig: &tls.Config{InsecureSkipVerify: c.BadSSL},
})
return &InfluxUnifi{cf: c, influx: i}, err
}
// ReportMetrics batches all device and client data into influxdb data points.
// Call this after you've collected all the data you care about.
// Returns an error if influxdb calls fail, otherwise returns a report.
func (u *InfluxUnifi) ReportMetrics(m *metrics.Metrics) (*Report, error) {
r := &Report{Metrics: m, ch: make(chan *metric), Start: time.Now()}
defer close(r.ch)
// Make a new Influx Points Batcher.
var err error
r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.cf.Database})
if err != nil {
return nil, fmt.Errorf("influx.NewBatchPoints: %v", err)
}
go u.collect(r, r.ch)
// Batch all the points.
u.loopPoints(r)
r.wg.Wait() // wait for all points to finish batching!
// Send all the points.
if err = u.influx.Write(r.bp); err != nil {
return nil, fmt.Errorf("influxdb.Write(points): %v", err)
}
r.Elapsed = time.Since(r.Start)
return r, nil
}
// collect runs in a go routine and batches all the points.
func (u *InfluxUnifi) collect(r report, ch chan *metric) {
for m := range ch {
pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, r.metrics().TS)
if err != nil {
r.error(err)
} else {
r.batch(m, pt)
}
r.done()
}
}
// loopPoints kicks off 3 or 7 go routines to process metrics and send them
// to the collect routine through the metric channel.
func (u *InfluxUnifi) loopPoints(r report) {
m := r.metrics()
r.add()
go func() {
defer r.done()
for _, s := range m.Sites {
u.batchSite(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.Clients {
u.batchClient(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.IDSList {
u.batchIDS(r, s)
}
}()
if m.Devices == nil {
return
}
r.add()
go func() {
defer r.done()
for _, s := range m.UAPs {
u.batchUAP(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.USGs {
u.batchUSG(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.USWs {
u.batchUSW(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.UDMs {
u.batchUDM(r, s)
}
}()
}

View File

@ -4,13 +4,13 @@ import (
"sync"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
"github.com/davidnewhall/unifi-poller/pkg/poller"
influx "github.com/influxdata/influxdb1-client/v2"
)
// Report is returned to the calling procedure after everything is processed.
type Report struct {
Metrics *metrics.Metrics
Metrics *poller.Metrics
Errors []error
Total int
Fields int
@ -28,10 +28,10 @@ type report interface {
send(m *metric)
error(err error)
batch(m *metric, pt *influx.Point)
metrics() *metrics.Metrics
metrics() *poller.Metrics
}
func (r *Report) metrics() *metrics.Metrics {
func (r *Report) metrics() *poller.Metrics {
return r.Metrics
}

View File

@ -11,6 +11,7 @@ func (u *InfluxUnifi) batchSite(r report, s *unifi.Site) {
tags := map[string]string{
"name": s.Name,
"site_name": s.SiteName,
"source": s.SourceName,
"desc": s.Desc,
"status": h.Status,
"subsystem": h.Subsystem,
@ -51,6 +52,27 @@ func (u *InfluxUnifi) batchSite(r report, s *unifi.Site) {
"remote_user_tx_packets": h.RemoteUserTxPackets.Val,
"num_new_alarms": s.NumNewAlarms.Val,
}
r.send(&metric{Table: "subsystems", Tags: tags, Fields: fields})
}
}
func (u *InfluxUnifi) batchSiteDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
r.send(&metric{
Table: "sitedpi",
Tags: map[string]string{
"category": unifi.DPICats.Get(dpi.Cat),
"application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
"site_name": s.SiteName,
"source": s.SourceName,
},
Fields: map[string]interface{}{
"tx_packets": dpi.TxPackets,
"rx_packets": dpi.RxPackets,
"tx_bytes": dpi.TxBytes,
"rx_bytes": dpi.RxBytes,
}},
)
}
}

View File

@ -10,9 +10,11 @@ func (u *InfluxUnifi) batchUAP(r report, s *unifi.UAP) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
@ -30,6 +32,7 @@ func (u *InfluxUnifi) batchUAP(r report, s *unifi.UAP) {
fields["user-num_sta"] = int(s.UserNumSta.Val)
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
fields["num_sta"] = s.NumSta.Val
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
u.processRadTable(r, tags, s.RadioTable, s.RadioTableStats)
u.processVAPTable(r, tags, s.VapTable)
@ -39,6 +42,7 @@ func (u *InfluxUnifi) processUAPstats(ap *unifi.Ap) map[string]interface{} {
if ap == nil {
return map[string]interface{}{}
}
// Accumulative Statistics.
return map[string]interface{}{
"stat_user-rx_packets": ap.UserRxPackets.Val,
@ -82,6 +86,7 @@ func (u *InfluxUnifi) processVAPTable(r report, t map[string]string, vt unifi.Va
tags := map[string]string{
"device_name": t["name"],
"site_name": t["site_name"],
"source": t["source"],
"ap_mac": s.ApMac,
"bssid": s.Bssid,
"id": s.ID,
@ -135,6 +140,7 @@ func (u *InfluxUnifi) processVAPTable(r report, t map[string]string, vt unifi.Va
"wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
"wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
}
r.send(&metric{Table: "uap_vaps", Tags: tags, Fields: fields})
}
}
@ -144,6 +150,7 @@ func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.Ra
tags := map[string]string{
"device_name": t["name"],
"site_name": t["site_name"],
"source": t["source"],
"channel": p.Channel.Txt,
"radio": p.Radio,
}
@ -155,6 +162,7 @@ func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.Ra
"nss": p.Nss.Val,
"radio_caps": p.RadioCaps.Val,
}
for _, t := range rts {
if t.Name == p.Name {
fields["ast_be_xmit"] = t.AstBeXmit.Val
@ -171,9 +179,11 @@ func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.Ra
fields["tx_power"] = t.TxPower.Val
fields["tx_retries"] = t.TxRetries.Val
fields["user-num_sta"] = t.UserNumSta.Val
break
}
}
r.send(&metric{Table: "uap_radios", Tags: tags, Fields: fields})
}
}

View File

@ -4,14 +4,16 @@ import (
"golift.io/unifi"
)
// Combines concatenates N maps. This will delete things if not used with caution.
// Combine concatenates N maps. This will delete things if not used with caution.
func Combine(in ...map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{})
for i := range in {
for k := range in[i] {
out[k] = in[i][k]
}
}
return out
}
@ -36,7 +38,9 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"source": s.SourceName,
"mac": s.Mac,
"site_name": s.SiteName,
"name": s.Name,
@ -49,6 +53,7 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
u.batchUSGstat(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
u.batchSysStats(s.SysStats, s.SystemStats),
map[string]interface{}{
"source": s.SourceName,
"ip": s.IP,
"bytes": s.Bytes.Val,
"last_seen": s.LastSeen.Val,
@ -65,6 +70,7 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
"num_mobile": s.NumMobile.Val,
},
)
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
u.batchNetTable(r, tags, s.NetworkTable)
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
@ -72,6 +78,7 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
tags = map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
@ -90,16 +97,18 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
"uptime": s.Uptime.Val,
"state": s.State.Val,
})
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
u.batchPortTable(r, tags, s.PortTable)
if s.Stat.Ap == nil {
return
// we're done now. the following code process UDM (non-pro) UAP data.
return // we're done now. the following code process UDM (non-pro) UAP data.
}
tags = map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
@ -117,6 +126,7 @@ func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
fields["user-num_sta"] = int(s.UserNumSta.Val)
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
fields["num_sta"] = s.NumSta.Val
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
u.processVAPTable(r, tags, *s.VapTable)

View File

@ -10,9 +10,11 @@ func (u *InfluxUnifi) batchUSG(r report, s *unifi.USG) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
@ -39,44 +41,17 @@ func (u *InfluxUnifi) batchUSG(r report, s *unifi.USG) {
"num_mobile": s.NumMobile.Val,
},
)
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
u.batchNetTable(r, tags, s.NetworkTable)
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
/*
for _, p := range s.PortTable {
t := map[string]string{
"device_name": tags["name"],
"site_name": tags["site_name"],
"name": p.Name,
"ifname": p.Ifname,
"ip": p.IP,
"mac": p.Mac,
"up": p.Up.Txt,
"speed": p.Speed.Txt,
"full_duplex": p.FullDuplex.Txt,
"enable": p.Enable.Txt,
}
f := map[string]interface{}{
"rx_bytes": p.RxBytes.Val,
"rx_dropped": p.RxDropped.Val,
"rx_errors": p.RxErrors.Val,
"rx_packets": p.RxBytes.Val,
"tx_bytes": p.TxBytes.Val,
"tx_dropped": p.TxDropped.Val,
"tx_errors": p.TxErrors.Val,
"tx_packets": p.TxPackets.Val,
"rx_multicast": p.RxMulticast.Val,
"dns_servers": strings.Join(p.DNS, ","),
}
r.send(&metric{Table: "usg_ports", Tags: t, Fields: f})
}
*/
}
func (u *InfluxUnifi) batchUSGstat(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]interface{} {
if gw == nil {
return map[string]interface{}{}
}
return map[string]interface{}{
"uplink_latency": ul.Latency.Val,
"uplink_speed": ul.Speed.Val,
@ -92,14 +67,17 @@ func (u *InfluxUnifi) batchUSGstat(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul un
"lan-rx_dropped": gw.LanRxDropped.Val,
}
}
func (u *InfluxUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
for _, wan := range wans {
if !wan.Up.Val {
continue
}
tags := map[string]string{
"device_name": tags["name"],
"site_name": tags["site_name"],
"source": tags["source"],
"ip": wan.IP,
"purpose": wan.Name,
"mac": wan.Mac,
@ -129,6 +107,7 @@ func (u *InfluxUnifi) batchUSGwans(r report, tags map[string]string, wans ...uni
"tx_broadcast": wan.TxBroadcast.Val,
"tx_multicast": wan.TxMulticast.Val,
}
r.send(&metric{Table: "usg_wan_ports", Tags: tags, Fields: fields})
}
}
@ -138,6 +117,7 @@ func (u *InfluxUnifi) batchNetTable(r report, tags map[string]string, nt unifi.N
tags := map[string]string{
"device_name": tags["name"],
"site_name": tags["site_name"],
"source": tags["source"],
"up": p.Up.Txt,
"enabled": p.Enabled.Txt,
"ip": p.IP,
@ -154,6 +134,7 @@ func (u *InfluxUnifi) batchNetTable(r report, tags map[string]string, nt unifi.N
"tx_bytes": p.TxBytes.Val,
"tx_packets": p.TxPackets.Val,
}
r.send(&metric{Table: "usg_networks", Tags: tags, Fields: fields})
}
}

View File

@ -14,6 +14,7 @@ func (u *InfluxUnifi) batchUSW(r report, s *unifi.USW) {
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
@ -36,6 +37,7 @@ func (u *InfluxUnifi) batchUSW(r report, s *unifi.USW) {
"state": s.State.Val,
"user-num_sta": s.UserNumSta.Val,
})
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
u.batchPortTable(r, tags, s.PortTable)
}
@ -44,6 +46,7 @@ func (u *InfluxUnifi) batchUSWstat(sw *unifi.Sw) map[string]interface{} {
if sw == nil {
return map[string]interface{}{}
}
return map[string]interface{}{
"stat_bytes": sw.Bytes.Val,
"stat_rx_bytes": sw.RxBytes.Val,
@ -59,14 +62,17 @@ func (u *InfluxUnifi) batchUSWstat(sw *unifi.Sw) map[string]interface{} {
"stat_tx_retries": sw.TxRetries.Val,
}
}
func (u *InfluxUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
for _, p := range pt {
if !p.Up.Val || !p.Enable.Val {
continue // only record UP ports.
}
tags := map[string]string{
"site_name": t["site_name"],
"device_name": t["name"],
"source": t["source"],
"name": p.Name,
"poe_mode": p.PoeMode,
"port_poe": p.PortPoe.Txt,
@ -96,11 +102,13 @@ func (u *InfluxUnifi) batchPortTable(r report, t map[string]string, pt []unifi.P
"tx_multicast": p.TxMulticast.Val,
"tx_packets": p.TxPackets.Val,
}
if p.PoeEnable.Val && p.PortPoe.Val {
fields["poe_current"] = p.PoeCurrent.Val
fields["poe_power"] = p.PoePower.Val
fields["poe_voltage"] = p.PoeVoltage.Val
}
r.send(&metric{Table: "usw_ports", Tags: tags, Fields: fields})
}
}

195
pkg/inputunifi/collector.go Normal file
View File

@ -0,0 +1,195 @@
package inputunifi
import (
"fmt"
"strings"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"golift.io/unifi"
)
func (u *InputUnifi) isNill(c *Controller) bool {
u.RLock()
defer u.RUnlock()
return c.Unifi == nil
}
// newDynamicCntrlr creates and saves a controller (with auth cookie) for repeated use.
// This is called when an unconfigured controller is requested.
func (u *InputUnifi) newDynamicCntrlr(url string) (bool, *Controller) {
u.Lock()
defer u.Unlock()
c := u.dynamic[url]
if c != nil {
// it already exists.
return false, c
}
ccopy := u.Default // copy defaults into new controller
c = &ccopy
u.dynamic[url] = c
c.Role = url
c.URL = url
return true, c
}
func (u *InputUnifi) dynamicController(url string) (*poller.Metrics, error) {
if !strings.HasPrefix(url, "http") {
return nil, fmt.Errorf("scrape filter match failed, and filter is not http URL")
}
new, c := u.newDynamicCntrlr(url)
if new {
u.Logf("Authenticating to Dynamic UniFi Controller: %s", url)
if err := u.getUnifi(c); err != nil {
return nil, fmt.Errorf("authenticating to %s: %v", url, err)
}
}
return u.collectController(c)
}
func (u *InputUnifi) collectController(c *Controller) (*poller.Metrics, error) {
if u.isNill(c) {
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
if err := u.getUnifi(c); err != nil {
return nil, fmt.Errorf("re-authenticating to %s: %v", c.Role, err)
}
}
return u.pollController(c)
}
func (u *InputUnifi) pollController(c *Controller) (*poller.Metrics, error) {
var err error
u.RLock()
defer u.RUnlock()
m := &poller.Metrics{TS: time.Now()} // At this point, it's the Current Check.
// Get the sites we care about.
if m.Sites, err = u.getFilteredSites(c); err != nil {
return m, fmt.Errorf("unifi.GetSites(%v): %v", c.URL, err)
}
if c.SaveDPI {
if m.SitesDPI, err = c.Unifi.GetSiteDPI(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetSiteDPI(%v): %v", c.URL, err)
}
if m.ClientsDPI, err = c.Unifi.GetClientsDPI(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetClientsDPI(%v): %v", c.URL, err)
}
}
if c.SaveIDS {
m.IDSList, err = c.Unifi.GetIDS(m.Sites, time.Now().Add(2*time.Minute), time.Now())
if err != nil {
return m, fmt.Errorf("unifi.GetIDS(%v): %v", c.URL, err)
}
}
// Get all the points.
if m.Clients, err = c.Unifi.GetClients(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetClients(%v): %v", c.URL, err)
}
if m.Devices, err = c.Unifi.GetDevices(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetDevices(%v): %v", c.URL, err)
}
return u.augmentMetrics(c, m), nil
}
// augmentMetrics is our middleware layer between collecting metrics and writing them.
// This is where we can manipuate the returned data or make arbitrary decisions.
// This function currently adds parent device names to client metrics.
func (u *InputUnifi) augmentMetrics(c *Controller, metrics *poller.Metrics) *poller.Metrics {
if metrics == nil || metrics.Devices == nil || metrics.Clients == nil {
return metrics
}
devices := make(map[string]string)
bssdIDs := make(map[string]string)
for _, r := range metrics.UAPs {
devices[r.Mac] = r.Name
for _, v := range r.VapTable {
bssdIDs[v.Bssid] = fmt.Sprintf("%s %s %s:", r.Name, v.Radio, v.RadioName)
}
}
for _, r := range metrics.USGs {
devices[r.Mac] = r.Name
}
for _, r := range metrics.USWs {
devices[r.Mac] = r.Name
}
for _, r := range metrics.UDMs {
devices[r.Mac] = r.Name
}
// These come blank, so set them here.
for i, c := range metrics.Clients {
if devices[c.Mac] = c.Name; c.Name == "" {
devices[c.Mac] = c.Hostname
}
metrics.Clients[i].SwName = devices[c.SwMac]
metrics.Clients[i].ApName = devices[c.ApMac]
metrics.Clients[i].GwName = devices[c.GwMac]
metrics.Clients[i].RadioDescription = bssdIDs[metrics.Clients[i].Bssid] + metrics.Clients[i].RadioProto
}
for i := range metrics.ClientsDPI {
// Name on Client DPI data also comes blank, find it based on MAC address.
metrics.ClientsDPI[i].Name = devices[metrics.ClientsDPI[i].MAC]
if metrics.ClientsDPI[i].Name == "" {
metrics.ClientsDPI[i].Name = metrics.ClientsDPI[i].MAC
}
}
if !*c.SaveSites {
metrics.Sites = nil
}
return metrics
}
// getFilteredSites returns a list of sites to fetch data for.
// Omits requested but unconfigured sites. Grabs the full list from the
// controller and returns the sites provided in the config file.
func (u *InputUnifi) getFilteredSites(c *Controller) (unifi.Sites, error) {
u.RLock()
defer u.RUnlock()
sites, err := c.Unifi.GetSites()
if err != nil {
return nil, err
} else if len(c.Sites) < 1 || StringInSlice("all", c.Sites) {
return sites, nil
}
i := 0
for _, s := range sites {
// Only include valid sites in the request filter.
if StringInSlice(s.Name, c.Sites) {
sites[i] = s
i++
}
}
return sites[:i], nil
}

198
pkg/inputunifi/input.go Normal file
View File

@ -0,0 +1,198 @@
// Package inputunifi implements the poller.Input interface and bridges the gap between
// metrics from the unifi library, and the augments required to pump them into unifi-poller.
package inputunifi
import (
"fmt"
"os"
"strings"
"sync"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"golift.io/unifi"
)
const (
defaultURL = "https://127.0.0.1:8443"
defaultUser = "unifipoller"
defaultPass = "unifipoller"
defaultSite = "all"
)
// InputUnifi contains the running data.
type InputUnifi struct {
*Config `json:"unifi" toml:"unifi" xml:"unifi" yaml:"unifi"`
dynamic map[string]*Controller
sync.Mutex // to lock the map above.
poller.Logger
}
// Controller represents the configuration for a UniFi Controller.
// Each polled controller may have its own configuration.
type Controller struct {
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
SaveIDS bool `json:"save_ids" toml:"save_ids" xml:"save_ids" yaml:"save_ids"`
SaveDPI bool `json:"save_dpi" toml:"save_dpi" xml:"save_dpi" yaml:"save_dpi"`
SaveSites *bool `json:"save_sites" toml:"save_sites" xml:"save_sites" yaml:"save_sites"`
Role string `json:"role" toml:"role" xml:"role,attr" yaml:"role"`
User string `json:"user" toml:"user" xml:"user" yaml:"user"`
Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
URL string `json:"url" toml:"url" xml:"url" yaml:"url"`
Sites []string `json:"sites,omitempty" toml:"sites,omitempty" xml:"site" yaml:"sites"`
Unifi *unifi.Unifi `json:"-" toml:"-" xml:"-" yaml:"-"`
}
// Config contains our configuration data
type Config struct {
sync.RWMutex // locks the Unifi struct member when re-authing to unifi.
Default Controller `json:"defaults" toml:"defaults" xml:"default" yaml:"defaults"`
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
Dynamic bool `json:"dynamic" toml:"dynamic" xml:"dynamic,attr" yaml:"dynamic"`
Controllers []*Controller `json:"controllers" toml:"controller" xml:"controller" yaml:"controllers"`
}
func init() {
u := &InputUnifi{}
poller.NewInput(&poller.InputPlugin{
Name: "unifi",
Input: u, // this library implements poller.Input interface for Metrics().
Config: u, // Defines our config data interface.
})
}
// getUnifi (re-)authenticates to a unifi controller.
func (u *InputUnifi) getUnifi(c *Controller) error {
var err error
u.Lock()
defer u.Unlock()
if c.Unifi != nil {
c.Unifi.CloseIdleConnections()
}
// Create an authenticated session to the Unifi Controller.
c.Unifi, err = unifi.NewUnifi(&unifi.Config{
User: c.User,
Pass: c.Pass,
URL: c.URL,
VerifySSL: c.VerifySSL,
ErrorLog: u.LogErrorf, // Log all errors.
DebugLog: u.LogDebugf, // Log debug messages.
})
if err != nil {
c.Unifi = nil
return fmt.Errorf("unifi controller: %v", err)
}
u.LogDebugf("Authenticated with controller successfully, %s", c.URL)
return nil
}
// checkSites makes sure the list of provided sites exists on the controller.
// This only runs once during initialization.
func (u *InputUnifi) checkSites(c *Controller) error {
u.RLock()
defer u.RUnlock()
if len(c.Sites) < 1 || c.Sites[0] == "" {
c.Sites = []string{"all"}
}
u.LogDebugf("Checking Controller Sites List")
sites, err := c.Unifi.GetSites()
if err != nil {
return err
}
msg := []string{}
for _, site := range sites {
msg = append(msg, site.Name+" ("+site.Desc+")")
}
u.Logf("Found %d site(s) on controller %s: %v", len(msg), c.Role, strings.Join(msg, ", "))
if StringInSlice("all", c.Sites) {
c.Sites = []string{"all"}
return nil
}
keep := []string{}
FIRST:
for _, s := range c.Sites {
for _, site := range sites {
if s == site.Name {
keep = append(keep, s)
continue FIRST
}
}
u.LogErrorf("Configured site not found on controller %s: %v", c.Role, s)
}
if c.Sites = keep; len(keep) < 1 {
c.Sites = []string{"all"}
}
return nil
}
func (u *InputUnifi) dumpSitesJSON(c *Controller, path, name string, sites unifi.Sites) ([]byte, error) {
allJSON := []byte{}
for _, s := range sites {
apiPath := fmt.Sprintf(path, s.Name)
_, _ = fmt.Fprintf(os.Stderr, "[INFO] Dumping %s: '%s' JSON for site: %s (%s):\n", name, apiPath, s.Desc, s.Name)
body, err := c.Unifi.GetJSON(apiPath)
if err != nil {
return allJSON, err
}
allJSON = append(allJSON, body...)
}
return allJSON, nil
}
func (u *InputUnifi) setDefaults(c *Controller) {
if c.SaveSites == nil {
t := true
c.SaveSites = &t
}
if c.URL == "" {
c.URL = defaultURL
}
if c.Role == "" {
c.Role = c.URL
}
if c.Pass == "" {
c.Pass = defaultPass
}
if c.User == "" {
c.User = defaultUser
}
if len(c.Sites) < 1 {
c.Sites = []string{defaultSite}
}
}
// StringInSlice returns true if a string is in a slice.
func StringInSlice(str string, slice []string) bool {
for _, s := range slice {
if strings.EqualFold(s, str) {
return true
}
}
return false
}

140
pkg/inputunifi/interface.go Normal file
View File

@ -0,0 +1,140 @@
package inputunifi
/* This file contains the three poller.Input interface methods. */
import (
"fmt"
"os"
"strings"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"golift.io/unifi"
)
// Initialize gets called one time when starting up.
// Satisfies poller.Input interface.
func (u *InputUnifi) Initialize(l poller.Logger) error {
if u.Disable {
l.Logf("UniFi input plugin disabled!")
return nil
}
if u.setDefaults(&u.Default); len(u.Controllers) < 1 && !u.Dynamic {
new := u.Default // copy defaults.
u.Controllers = []*Controller{&new}
}
if len(u.Controllers) < 1 {
l.Logf("No controllers configured. Polling dynamic controllers only!")
}
u.dynamic = make(map[string]*Controller)
u.Logger = l
for _, c := range u.Controllers {
u.setDefaults(c)
switch err := u.getUnifi(c); err {
case nil:
if err := u.checkSites(c); err != nil {
u.LogErrorf("checking sites on %s: %v", c.Role, err)
}
u.Logf("Configured UniFi Controller at %s v%s as user %s. Sites: %v",
c.URL, c.Unifi.ServerVersion, c.User, c.Sites)
default:
u.LogErrorf("Controller Auth or Connection failed, but continuing to retry! %s: %v", c.Role, err)
}
}
return nil
}
// Metrics grabs all the measurements from a UniFi controller and returns them.
func (u *InputUnifi) Metrics() (*poller.Metrics, bool, error) {
return u.MetricsFrom(nil)
}
// MetricsFrom grabs all the measurements from a UniFi controller and returns them.
func (u *InputUnifi) MetricsFrom(filter *poller.Filter) (*poller.Metrics, bool, error) {
if u.Disable {
return nil, false, nil
}
errs := []string{}
metrics := &poller.Metrics{}
ok := false
if filter != nil && filter.Path != "" {
if !u.Dynamic {
return metrics, false, fmt.Errorf("filter path requested but dynamic lookups disabled")
}
// Attempt a dynamic metrics fetch from an unconfigured controller.
m, err := u.dynamicController(filter.Path)
return m, err == nil && m != nil, err
}
// Check if the request is for an existing, configured controller.
for _, c := range u.Controllers {
if filter != nil && !strings.EqualFold(c.Role, filter.Role) {
continue
}
m, err := u.collectController(c)
if err != nil {
errs = append(errs, err.Error())
}
if m == nil {
continue
}
ok = true
metrics = poller.AppendMetrics(metrics, m)
}
if len(errs) > 0 {
return metrics, ok, fmt.Errorf(strings.Join(errs, ", "))
}
return metrics, ok, nil
}
// RawMetrics returns API output from the first configured unifi controller.
func (u *InputUnifi) RawMetrics(filter *poller.Filter) ([]byte, error) {
if l := len(u.Controllers); filter.Unit >= l {
return nil, fmt.Errorf("control number %d not found, %d controller(s) configured (0 index)", filter.Unit, l)
}
c := u.Controllers[filter.Unit]
if u.isNill(c) {
u.Logf("Re-authenticating to UniFi Controller: %s", c.URL)
if err := u.getUnifi(c); err != nil {
return nil, fmt.Errorf("re-authenticating to %s: %v", c.Role, err)
}
}
if err := u.checkSites(c); err != nil {
return nil, err
}
sites, err := u.getFilteredSites(c)
if err != nil {
return nil, err
}
switch filter.Kind {
case "d", "device", "devices":
return u.dumpSitesJSON(c, unifi.APIDevicePath, "Devices", sites)
case "client", "clients", "c":
return u.dumpSitesJSON(c, unifi.APIClientPath, "Clients", sites)
case "other", "o":
_, _ = fmt.Fprintf(os.Stderr, "[INFO] Dumping Path '%s':\n", filter.Path)
return c.Unifi.GetJSON(filter.Path)
default:
return []byte{}, fmt.Errorf("must provide filter: devices, clients, other")
}
}

View File

@ -1,16 +0,0 @@
package metrics
import (
"time"
"golift.io/unifi"
)
// Metrics is a type shared by the exporting and reporting packages.
type Metrics struct {
TS time.Time
unifi.Sites
unifi.IDSList
unifi.Clients
*unifi.Devices
}

View File

@ -2,5 +2,8 @@
package poller
// DefaultConfFile is where to find config is --config is not prvided.
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = "/usr/local/etc/unifi-poller/up.conf"
// DefaultObjPath is the path to look for shared object libraries (plugins).
const DefaultObjPath = "/usr/local/lib/unifi-poller"

View File

@ -2,5 +2,8 @@
package poller
// DefaultConfFile is where to find config is --config is not prvided.
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = "/etc/unifi-poller/up.conf"
// DefaultObjPath is the path to look for shared object libraries (plugins).
const DefaultObjPath = "/usr/lib/unifi-poller"

View File

@ -2,5 +2,8 @@
package poller
// DefaultConfFile is where to find config is --config is not prvided.
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = `C:\ProgramData\unifi-poller\up.conf`
// DefaultObjPath is useless in this context. Bummer.
const DefaultObjPath = "PLUGINS_DO_NOT_WORK_ON_WINDOWS_SOWWWWWY"

View File

@ -9,160 +9,141 @@ package poller
*/
import (
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"strconv"
"plugin"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/davidnewhall/unifi-poller/pkg/influxunifi"
"github.com/spf13/pflag"
"golift.io/cnfg"
"golift.io/cnfg/cnfgfile"
"golift.io/unifi"
yaml "gopkg.in/yaml.v2"
)
// Version is injected by the Makefile
var Version = "development"
const (
// App defaults in case they're missing from the config.
appName = "unifi-poller"
defaultInterval = 30 * time.Second
defaultInfluxDB = "unifi"
defaultInfluxUser = "unifi"
defaultInfluxPass = "unifi"
defaultInfluxURL = "http://127.0.0.1:8086"
defaultUnifiUser = "influx"
defaultUnifiURL = "https://127.0.0.1:8443"
defaultHTTPListen = "0.0.0.0:9130"
// AppName is the name of the application.
AppName = "unifi-poller"
// ENVConfigPrefix is the prefix appended to an env variable tag name.
ENVConfigPrefix = "UP"
)
// ENVConfigPrefix is the prefix appended to an env variable tag
// name before retrieving the value from the OS.
const ENVConfigPrefix = "UP_"
// UnifiPoller contains the application startup data, and auth info for UniFi & Influx.
type UnifiPoller struct {
Influx *influxunifi.InfluxUnifi
Unifi *unifi.Unifi
Flag *Flag
Config *Config
LastCheck time.Time
sync.Mutex // locks the Unifi struct member when re-authing to unifi.
Flags *Flags
*Config
}
// Flag represents the CLI args available and their settings.
type Flag struct {
// Flags represents the CLI args available and their settings.
type Flags struct {
ConfigFile string
DumpJSON string
ShowVer bool
*pflag.FlagSet
}
// Config represents the data needed to poll a controller and report to influxdb.
// This is all of the data stored in the config file.
// Any with explicit defaults have omitempty on json and toml tags.
// Metrics is a type shared by the exporting and reporting packages.
type Metrics struct {
TS time.Time
unifi.Sites
unifi.IDSList
unifi.Clients
*unifi.Devices
SitesDPI []*unifi.DPITable
ClientsDPI []*unifi.DPITable
}
// Config represents the core library input data.
type Config struct {
Interval Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
Debug bool `json:"debug" toml:"debug" xml:"debug" yaml:"debug"`
Quiet bool `json:"quiet,omitempty" toml:"quiet,omitempty" xml:"quiet" yaml:"quiet"`
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
SaveIDS bool `json:"save_ids" toml:"save_ids" xml:"save_ids" yaml:"save_ids"`
ReAuth bool `json:"reauthenticate" toml:"reauthenticate" xml:"reauthenticate" yaml:"reauthenticate"`
InfxBadSSL bool `json:"influx_insecure_ssl" toml:"influx_insecure_ssl" xml:"influx_insecure_ssl" yaml:"influx_insecure_ssl"`
SaveSites bool `json:"save_sites,omitempty" toml:"save_sites,omitempty" xml:"save_sites" yaml:"save_sites"`
Mode string `json:"mode" toml:"mode" xml:"mode" yaml:"mode"`
HTTPListen string `json:"http_listen" toml:"http_listen" xml:"http_listen" yaml:"http_listen"`
Namespace string `json:"namespace" toml:"namespace" xml:"namespace" yaml:"namespace"`
InfluxURL string `json:"influx_url,omitempty" toml:"influx_url,omitempty" xml:"influx_url" yaml:"influx_url"`
InfluxUser string `json:"influx_user,omitempty" toml:"influx_user,omitempty" xml:"influx_user" yaml:"influx_user"`
InfluxPass string `json:"influx_pass,omitempty" toml:"influx_pass,omitempty" xml:"influx_pass" yaml:"influx_pass"`
InfluxDB string `json:"influx_db,omitempty" toml:"influx_db,omitempty" xml:"influx_db" yaml:"influx_db"`
UnifiUser string `json:"unifi_user,omitempty" toml:"unifi_user,omitempty" xml:"unifi_user" yaml:"unifi_user"`
UnifiPass string `json:"unifi_pass,omitempty" toml:"unifi_pass,omitempty" xml:"unifi_pass" yaml:"unifi_pass"`
UnifiBase string `json:"unifi_url,omitempty" toml:"unifi_url,omitempty" xml:"unifi_url" yaml:"unifi_url"`
Sites []string `json:"sites,omitempty" toml:"sites,omitempty" xml:"sites" yaml:"sites"`
*Poller `json:"poller" toml:"poller" xml:"poller" yaml:"poller"`
}
// Duration is used to UnmarshalTOML into a time.Duration value.
type Duration struct{ time.Duration }
// UnmarshalText parses a duration type from a config file.
func (d *Duration) UnmarshalText(data []byte) (err error) {
d.Duration, err = time.ParseDuration(string(data))
return
// Poller is the global config values.
type Poller struct {
Plugins []string `json:"plugins" toml:"plugins" xml:"plugin" yaml:"plugins"`
Debug bool `json:"debug" toml:"debug" xml:"debug,attr" yaml:"debug"`
Quiet bool `json:"quiet,omitempty" toml:"quiet,omitempty" xml:"quiet,attr" yaml:"quiet"`
}
// ParseFile parses and returns our configuration data.
func (c *Config) ParseFile(configFile string) error {
switch buf, err := ioutil.ReadFile(configFile); {
case err != nil:
return err
case strings.Contains(configFile, ".json"):
return json.Unmarshal(buf, c)
case strings.Contains(configFile, ".xml"):
return xml.Unmarshal(buf, c)
case strings.Contains(configFile, ".yaml"):
return yaml.Unmarshal(buf, c)
default:
return toml.Unmarshal(buf, c)
}
}
// LoadPlugins reads-in dynamic shared libraries.
// Not used very often, if at all.
func (u *UnifiPoller) LoadPlugins() error {
for _, p := range u.Plugins {
name := strings.TrimSuffix(p, ".so") + ".so"
// ParseENV copies environment variables into configuration values.
// This is useful for Docker users that find it easier to pass ENV variables
// than a specific configuration file. Uses reflection to find struct tags.
// This method uses the json struct tag member to match environment variables.
// Use a custom tag name by changing "json" below, but that's overkill for this app.
func (c *Config) ParseENV() error {
t := reflect.TypeOf(*c) // Get "types" from the Config struct.
for i := 0; i < t.NumField(); i++ { // Loop each Config struct member
tag := t.Field(i).Tag.Get("json") // Get the ENV variable name from "json" struct tag
tag = strings.Split(strings.ToUpper(tag), ",")[0] // Capitalize and remove ,omitempty suffix
env := os.Getenv(ENVConfigPrefix + tag) // Then pull value from OS.
if tag == "" || env == "" { // Skip if either are empty.
continue
if name == ".so" {
continue // Just ignore it. uhg.
}
// Reflect and update the u.Config struct member at position i.
switch field := reflect.ValueOf(c).Elem().Field(i); field.Type().String() {
// Handle each member type appropriately (differently).
case "string":
// This is a reflect package method to update a struct member by index.
field.SetString(env)
case "int":
val, err := strconv.Atoi(env)
if err != nil {
return fmt.Errorf("%s: %v", tag, err)
}
field.Set(reflect.ValueOf(val))
case "[]string":
field.Set(reflect.ValueOf(strings.Split(env, ",")))
case path.Base(t.PkgPath()) + ".Duration":
val, err := time.ParseDuration(env)
if err != nil {
return fmt.Errorf("%s: %v", tag, err)
}
field.Set(reflect.ValueOf(Duration{val}))
case "bool":
val, err := strconv.ParseBool(env)
if err != nil {
return fmt.Errorf("%s: %v", tag, err)
}
field.SetBool(val)
if _, err := os.Stat(name); os.IsNotExist(err) {
name = path.Join(DefaultObjPath, name)
}
u.Logf("Loading Dynamic Plugin: %s", name)
if _, err := plugin.Open(name); err != nil {
return err
}
}
return nil
}
// ParseConfigs parses the poller config and the config for each registered output plugin.
func (u *UnifiPoller) ParseConfigs() error {
// Parse core config.
if err := u.parseInterface(u.Config); err != nil {
return err
}
// Load dynamic plugins.
if err := u.LoadPlugins(); err != nil {
return err
}
if err := u.parseInputs(); err != nil {
return err
}
return u.parseOutputs()
}
// parseInterface parses the config file and environment variables into the provided interface.
func (u *UnifiPoller) parseInterface(i interface{}) error {
// Parse config file into provided interface.
if err := cnfgfile.Unmarshal(i, u.Flags.ConfigFile); err != nil {
return err
}
// Parse environment variables into provided interface.
_, err := cnfg.UnmarshalENV(i, ENVConfigPrefix)
return err
}
// Parse input plugin configs.
func (u *UnifiPoller) parseInputs() error {
inputSync.Lock()
defer inputSync.Unlock()
for _, i := range inputs {
if err := u.parseInterface(i.Config); err != nil {
return err
}
}
return nil
}
// Parse output plugin configs.
func (u *UnifiPoller) parseOutputs() error {
outputSync.Lock()
defer outputSync.Unlock()
for _, o := range outputs {
if err := u.parseInterface(o.Config); err != nil {
return err
}
// Add more types here if more types are added to the config struct.
}
return nil

View File

@ -2,66 +2,32 @@ package poller
import (
"fmt"
"os"
"strconv"
"strings"
"golift.io/unifi"
)
// DumpJSONPayload prints raw json from the UniFi Controller.
// DumpJSONPayload prints raw json from the UniFi Controller. This is currently
// tied into the -j CLI arg, and is probably not very useful outside that context.
func (u *UnifiPoller) DumpJSONPayload() (err error) {
u.Config.Quiet = true
u.Unifi, err = unifi.NewUnifi(&unifi.Config{
User: u.Config.UnifiUser,
Pass: u.Config.UnifiPass,
URL: u.Config.UnifiBase,
VerifySSL: u.Config.VerifySSL,
})
split := strings.SplitN(u.Flags.DumpJSON, " ", 2)
filter := &Filter{Kind: split[0]}
if split2 := strings.Split(filter.Kind, ":"); len(split2) > 1 {
filter.Kind = split2[0]
filter.Unit, _ = strconv.Atoi(split2[1])
}
if len(split) > 1 {
filter.Path = split[1]
}
m, err := inputs[0].RawMetrics(filter)
if err != nil {
return err
}
fmt.Fprintf(os.Stderr, "[INFO] Authenticated to UniFi Controller @ %v as user %v",
u.Config.UnifiBase, u.Config.UnifiUser)
if err := u.CheckSites(); err != nil {
return err
}
fmt.Println(string(m))
u.Unifi.ErrorLog = func(m string, v ...interface{}) {
fmt.Fprintf(os.Stderr, "[ERROR] "+m, v...)
} // Log all errors to stderr.
switch sites, err := u.GetFilteredSites(); {
case err != nil:
return err
case StringInSlice(u.Flag.DumpJSON, []string{"d", "device", "devices"}):
return u.dumpSitesJSON(unifi.APIDevicePath, "Devices", sites)
case StringInSlice(u.Flag.DumpJSON, []string{"client", "clients", "c"}):
return u.dumpSitesJSON(unifi.APIClientPath, "Clients", sites)
case strings.HasPrefix(u.Flag.DumpJSON, "other "):
apiPath := strings.SplitN(u.Flag.DumpJSON, " ", 2)[1]
_, _ = fmt.Fprintf(os.Stderr, "[INFO] Dumping Path '%s':\n", apiPath)
return u.PrintRawAPIJSON(apiPath)
default:
return fmt.Errorf("must provide filter: devices, clients, other")
}
}
func (u *UnifiPoller) dumpSitesJSON(path, name string, sites unifi.Sites) error {
for _, s := range sites {
apiPath := fmt.Sprintf(path, s.Name)
_, _ = fmt.Fprintf(os.Stderr, "[INFO] Dumping %s: '%s' JSON for site: %s (%s):\n",
name, apiPath, s.Desc, s.Name)
if err := u.PrintRawAPIJSON(apiPath); err != nil {
return err
}
}
return nil
}
// PrintRawAPIJSON prints the raw json for a user-provided path on a UniFi Controller.
func (u *UnifiPoller) PrintRawAPIJSON(apiPath string) error {
body, err := u.Unifi.GetJSON(apiPath)
fmt.Println(string(body))
return err
}

View File

@ -1,71 +0,0 @@
package poller
import (
"fmt"
"time"
"github.com/davidnewhall/unifi-poller/pkg/influxunifi"
)
// GetInfluxDB returns an InfluxDB interface.
func (u *UnifiPoller) GetInfluxDB() (err error) {
if u.Influx != nil {
return nil
}
u.Influx, err = influxunifi.New(&influxunifi.Config{
Database: u.Config.InfluxDB,
User: u.Config.InfluxUser,
Pass: u.Config.InfluxPass,
BadSSL: u.Config.InfxBadSSL,
URL: u.Config.InfluxURL,
})
if err != nil {
return fmt.Errorf("influxdb: %v", err)
}
u.Logf("Logging Measurements to InfluxDB at %s as user %s", u.Config.InfluxURL, u.Config.InfluxUser)
return nil
}
// CollectAndProcess collects measurements and then reports them to InfluxDB
// Can be called once or in a ticker loop. This function and all the ones below
// handle their own logging. An error is returned so the calling function may
// determine if there was a read or write error and act on it. This is currently
// called in two places in this library. One returns an error, one does not.
func (u *UnifiPoller) CollectAndProcess() error {
if err := u.GetInfluxDB(); err != nil {
return err
}
metrics, err := u.CollectMetrics()
if err != nil {
return err
}
u.AugmentMetrics(metrics)
report, err := u.Influx.ReportMetrics(metrics)
if err != nil {
return err
}
u.LogInfluxReport(report)
return nil
}
// LogInfluxReport writes a log message after exporting to influxdb.
func (u *UnifiPoller) LogInfluxReport(r *influxunifi.Report) {
idsMsg := ""
if u.Config.SaveIDS {
idsMsg = fmt.Sprintf("IDS Events: %d, ", len(r.Metrics.IDSList))
}
u.Logf("UniFi Metrics Recorded. Sites: %d, Clients: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, %sPoints: %d, Fields: %d, Errs: %d, Elapsed: %v",
len(r.Metrics.Sites), len(r.Metrics.Clients), len(r.Metrics.UAPs),
len(r.Metrics.UDMs)+len(r.Metrics.USGs), len(r.Metrics.USWs), idsMsg, r.Total,
r.Fields, len(r.Errors), r.Elapsed.Round(time.Millisecond))
}

165
pkg/poller/inputs.go Normal file
View File

@ -0,0 +1,165 @@
package poller
import (
"fmt"
"strings"
"sync"
"golift.io/unifi"
)
var (
inputs []*InputPlugin
inputSync sync.Mutex
)
// Input plugins must implement this interface.
type Input interface {
Initialize(Logger) error // Called once on startup to initialize the plugin.
Metrics() (*Metrics, bool, error) // Called every time new metrics are requested.
MetricsFrom(*Filter) (*Metrics, bool, error) // Called every time new metrics are requested.
RawMetrics(*Filter) ([]byte, error)
}
// InputPlugin describes an input plugin's consumable interface.
type InputPlugin struct {
Name string
Config interface{} // Each config is passed into an unmarshaller later.
Input
}
// Filter is used for metrics filters. Many fields for lots of expansion.
type Filter struct {
Type string
Term string
Name string
Tags string
Role string
Kind string
Path string
Area int
Item int
Unit int
Sign int64
Mass int64
Rate float64
Cost float64
Free bool
True bool
Done bool
Stop bool
}
// NewInput creates a metric input. This should be called by input plugins
// init() functions.
func NewInput(i *InputPlugin) {
inputSync.Lock()
defer inputSync.Unlock()
if i == nil || i.Input == nil {
panic("nil output or method passed to poller.NewOutput")
}
inputs = append(inputs, i)
}
// InitializeInputs runs the passed-in initializer method for each input plugin.
func (u *UnifiPoller) InitializeInputs() error {
inputSync.Lock()
defer inputSync.Unlock()
for _, input := range inputs {
// This must return, or the app locks up here.
if err := input.Initialize(u); err != nil {
return err
}
}
return nil
}
// Metrics aggregates all the measurements from all configured inputs and returns them.
func (u *UnifiPoller) Metrics() (*Metrics, bool, error) {
errs := []string{}
metrics := &Metrics{}
ok := false
for _, input := range inputs {
m, _, err := input.Metrics()
if err != nil {
errs = append(errs, err.Error())
}
if m == nil {
continue
}
ok = true
metrics = AppendMetrics(metrics, m)
}
var err error
if len(errs) > 0 {
err = fmt.Errorf(strings.Join(errs, ", "))
}
return metrics, ok, err
}
// MetricsFrom aggregates all the measurements from filtered inputs and returns them.
func (u *UnifiPoller) MetricsFrom(filter *Filter) (*Metrics, bool, error) {
errs := []string{}
metrics := &Metrics{}
ok := false
for _, input := range inputs {
if !strings.EqualFold(input.Name, filter.Name) {
continue
}
m, _, err := input.MetricsFrom(filter)
if err != nil {
errs = append(errs, err.Error())
}
if m == nil {
continue
}
ok = true
metrics = AppendMetrics(metrics, m)
}
var err error
if len(errs) > 0 {
err = fmt.Errorf(strings.Join(errs, ", "))
}
return metrics, ok, err
}
// AppendMetrics combined the metrics from two sources.
func AppendMetrics(existing *Metrics, m *Metrics) *Metrics {
existing.SitesDPI = append(existing.SitesDPI, m.SitesDPI...)
existing.Sites = append(existing.Sites, m.Sites...)
existing.ClientsDPI = append(existing.ClientsDPI, m.ClientsDPI...)
existing.Clients = append(existing.Clients, m.Clients...)
existing.IDSList = append(existing.IDSList, m.IDSList...)
if m.Devices == nil {
return existing
}
if existing.Devices == nil {
existing.Devices = &unifi.Devices{}
}
existing.UAPs = append(existing.UAPs, m.UAPs...)
existing.USGs = append(existing.USGs, m.USGs...)
existing.USWs = append(existing.USWs, m.USWs...)
existing.UDMs = append(existing.UDMs, m.UDMs...)
return existing
}

View File

@ -3,31 +3,27 @@ package poller
import (
"fmt"
"log"
"strings"
)
const callDepth = 2
// StringInSlice returns true if a string is in a slice.
func StringInSlice(str string, slice []string) bool {
for _, s := range slice {
if strings.EqualFold(s, str) {
return true
}
}
return false
// Logger is passed into input packages so they may write logs.
type Logger interface {
Logf(m string, v ...interface{})
LogErrorf(m string, v ...interface{})
LogDebugf(m string, v ...interface{})
}
// Logf prints a log entry if quiet is false.
func (u *UnifiPoller) Logf(m string, v ...interface{}) {
if !u.Config.Quiet {
if !u.Quiet {
_ = log.Output(callDepth, fmt.Sprintf("[INFO] "+m, v...))
}
}
// LogDebugf prints a debug log entry if debug is true and quite is false
func (u *UnifiPoller) LogDebugf(m string, v ...interface{}) {
if u.Config.Debug && !u.Config.Quiet {
if u.Debug && !u.Quiet {
_ = log.Output(callDepth, fmt.Sprintf("[DEBUG] "+m, v...))
}
}

72
pkg/poller/outputs.go Normal file
View File

@ -0,0 +1,72 @@
package poller
import (
"fmt"
"sync"
)
var (
outputs []*Output
outputSync sync.Mutex
)
// Collect is passed into output packages so they may collect metrics to output.
// Output packages must implement this interface.
type Collect interface {
Metrics() (*Metrics, bool, error)
MetricsFrom(*Filter) (*Metrics, bool, error)
Logger
}
// Output defines the output data for a metric exporter like influx or prometheus.
// Output packages should call NewOutput with this struct in init().
type Output struct {
Name string
Config interface{} // Each config is passed into an unmarshaller later.
Method func(Collect) error // Called on startup for each configured output.
}
// NewOutput should be called by each output package's init function.
func NewOutput(o *Output) {
outputSync.Lock()
defer outputSync.Unlock()
if o == nil || o.Method == nil {
panic("nil output or method passed to poller.NewOutput")
}
outputs = append(outputs, o)
}
// InitializeOutputs runs all the configured output plugins.
// If none exist, or they all exit an error is returned.
func (u *UnifiPoller) InitializeOutputs() error {
v := make(chan error)
defer close(v)
var count int
for _, o := range outputs {
count++
go func(o *Output) {
v <- o.Method(u)
}(o)
}
if count < 1 {
return fmt.Errorf("no output plugins imported")
}
for err := range v {
if err != nil {
return err
}
if count--; count < 1 {
return fmt.Errorf("all output plugins have stopped, or none enabled")
}
}
return nil
}

View File

@ -1,70 +0,0 @@
package poller
import (
"net/http"
"strings"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
"github.com/davidnewhall/unifi-poller/pkg/promunifi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
)
const oneDecimalPoint = 10
// RunPrometheus starts the web server and registers the collector.
func (u *UnifiPoller) RunPrometheus() error {
u.Logf("Exporting Measurements for Prometheus at https://%s/metrics", u.Config.HTTPListen)
http.Handle("/metrics", promhttp.Handler())
ns := strings.Replace(u.Config.Namespace, "-", "", -1)
prometheus.MustRegister(promunifi.NewUnifiCollector(promunifi.UnifiCollectorCnfg{
Namespace: ns,
CollectFn: u.ExportMetrics,
LoggingFn: u.LogExportReport,
ReportErrors: true, // XXX: Does this need to be configurable?
}))
version.Version = Version
prometheus.MustRegister(version.NewCollector(ns))
return http.ListenAndServe(u.Config.HTTPListen, nil)
}
// ExportMetrics updates the internal metrics provided via
// HTTP at /metrics for prometheus collection.
// This is run by Prometheus as CollectFn.
func (u *UnifiPoller) ExportMetrics() (*metrics.Metrics, error) {
m, err := u.CollectMetrics()
if err != nil {
u.LogErrorf("collecting metrics: %v", err)
u.Logf("Re-authenticating to UniFi Controller")
if err := u.GetUnifi(); err != nil {
u.LogErrorf("re-authenticating: %v", err)
return nil, err
}
if m, err = u.CollectMetrics(); err != nil {
u.LogErrorf("collecting metrics: %v", err)
return nil, err
}
}
u.AugmentMetrics(m)
return m, nil
}
// LogExportReport is called after prometheus exports metrics.
// This is run by Prometheus as LoggingFn
func (u *UnifiPoller) LogExportReport(report *promunifi.Report) {
m := report.Metrics
u.Logf("UniFi Measurements Exported. Site: %d, Client: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, Descs: %d, "+
"Metrics: %d, Errs: %d, 0s: %d, Reqs/Total: %v / %v",
len(m.Sites), len(m.Clients), len(m.UAPs), len(m.UDMs)+len(m.USGs), len(m.USWs),
report.Descs, report.Total, report.Errors, report.Zeros,
report.Fetch.Round(time.Millisecond/oneDecimalPoint),
report.Elapsed.Round(time.Millisecond/oneDecimalPoint))
}

View File

@ -5,34 +5,14 @@ import (
"fmt"
"log"
"os"
"strings"
"time"
"github.com/prometheus/common/version"
"github.com/spf13/pflag"
)
// New returns a new poller struct preloaded with default values.
// No need to call this if you call Start.c
// New returns a new poller struct.
func New() *UnifiPoller {
return &UnifiPoller{
Config: &Config{
InfluxURL: defaultInfluxURL,
InfluxUser: defaultInfluxUser,
InfluxPass: defaultInfluxPass,
InfluxDB: defaultInfluxDB,
UnifiUser: defaultUnifiUser,
UnifiPass: "",
UnifiBase: defaultUnifiURL,
Interval: Duration{defaultInterval},
Sites: []string{"all"},
SaveSites: true,
HTTPListen: defaultHTTPListen,
Namespace: appName,
},
Flag: &Flag{
ConfigFile: DefaultConfFile,
},
}
return &UnifiPoller{Config: &Config{Poller: &Poller{}}, Flags: &Flags{}}
}
// Start begins the application from a CLI.
@ -41,46 +21,30 @@ func New() *UnifiPoller {
func (u *UnifiPoller) Start() error {
log.SetOutput(os.Stdout)
log.SetFlags(log.LstdFlags)
u.Flag.Parse(os.Args[1:])
u.Flags.Parse(os.Args[1:])
if u.Flag.ShowVer {
fmt.Printf("%s v%s\n", appName, Version)
if u.Flags.ShowVer {
fmt.Printf("%s v%s\n", AppName, version.Version)
return nil // don't run anything else w/ version request.
}
if u.Flag.DumpJSON == "" { // do not print this when dumping JSON.
u.Logf("Loading Configuration File: %s", u.Flag.ConfigFile)
if u.Flags.DumpJSON == "" { // do not print this when dumping JSON.
u.Logf("Loading Configuration File: %s", u.Flags.ConfigFile)
}
// Parse config file.
if err := u.Config.ParseFile(u.Flag.ConfigFile); err != nil {
u.Flag.Usage()
// Parse config file and ENV variables.
if err := u.ParseConfigs(); err != nil {
return err
}
// Update Config with ENV variable overrides.
if err := u.Config.ParseENV(); err != nil {
return err
}
if u.Flag.DumpJSON != "" {
return u.DumpJSONPayload()
}
if u.Config.Debug {
log.SetFlags(log.Lshortfile | log.Lmicroseconds | log.Ldate)
u.LogDebugf("Debug Logging Enabled")
}
log.Printf("[INFO] UniFi Poller v%v Starting Up! PID: %d", Version, os.Getpid())
return u.Run()
}
// Parse turns CLI arguments into data structures. Called by Start() on startup.
func (f *Flag) Parse(args []string) {
f.FlagSet = pflag.NewFlagSet(appName, pflag.ExitOnError)
func (f *Flags) Parse(args []string) {
f.FlagSet = pflag.NewFlagSet(AppName, pflag.ExitOnError)
f.Usage = func() {
fmt.Printf("Usage: %s [--config=/path/to/up.conf] [--version]", appName)
fmt.Printf("Usage: %s [--config=/path/to/up.conf] [--version]", AppName)
f.PrintDefaults()
}
@ -96,44 +60,24 @@ func (f *Flag) Parse(args []string) {
// 2. Run the collector one time and report the metrics to influxdb. (lambda)
// 3. Start a web server and wait for Prometheus to poll the application for metrics.
func (u *UnifiPoller) Run() error {
switch err := u.GetUnifi(); err {
case nil:
u.Logf("Polling UniFi Controller at %s v%s as user %s. Sites: %v",
u.Config.UnifiBase, u.Unifi.ServerVersion, u.Config.UnifiUser, u.Config.Sites)
default:
u.LogErrorf("Controller Auth or Connection failed, but continuing to retry! %v", err)
}
switch strings.ToLower(u.Config.Mode) {
default:
u.PollController()
return nil
case "influxlambda", "lambdainflux", "lambda_influx", "influx_lambda":
u.LastCheck = time.Now()
return u.CollectAndProcess()
case "both":
go u.PollController()
fallthrough
case "prometheus", "exporter":
return u.RunPrometheus()
}
}
// PollController runs forever, polling UniFi and pushing to InfluxDB
// This is started by Run() or RunBoth() after everything checks out.
func (u *UnifiPoller) PollController() {
interval := u.Config.Interval.Round(time.Second)
log.Printf("[INFO] Everything checks out! Poller started, InfluxDB interval: %v", interval)
ticker := time.NewTicker(interval)
for u.LastCheck = range ticker.C {
if err := u.CollectAndProcess(); err != nil {
u.LogErrorf("%v", err)
if u.Unifi != nil {
u.Unifi.CloseIdleConnections()
u.Unifi = nil // trigger re-auth in unifi.go.
}
if u.Flags.DumpJSON != "" {
if err := u.InitializeInputs(); err != nil {
return err
}
return u.DumpJSONPayload()
}
if u.Debug {
log.SetFlags(log.Lshortfile | log.Lmicroseconds | log.Ldate)
u.LogDebugf("Debug Logging Enabled")
}
log.Printf("[INFO] UniFi Poller v%v Starting Up! PID: %d", version.Version, os.Getpid())
if err := u.InitializeInputs(); err != nil {
return err
}
return u.InitializeOutputs()
}

View File

@ -1,181 +0,0 @@
package poller
import (
"fmt"
"strings"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
"golift.io/unifi"
)
// GetUnifi returns a UniFi controller interface.
func (u *UnifiPoller) GetUnifi() (err error) {
u.Lock()
defer u.Unlock()
if u.Unifi != nil {
u.Unifi.CloseIdleConnections()
}
// Create an authenticated session to the Unifi Controller.
u.Unifi, err = unifi.NewUnifi(&unifi.Config{
User: u.Config.UnifiUser,
Pass: u.Config.UnifiPass,
URL: u.Config.UnifiBase,
VerifySSL: u.Config.VerifySSL,
ErrorLog: u.LogErrorf, // Log all errors.
DebugLog: u.LogDebugf, // Log debug messages.
})
if err != nil {
u.Unifi = nil
return fmt.Errorf("unifi controller: %v", err)
}
u.LogDebugf("Authenticated with controller successfully")
return u.CheckSites()
}
// CheckSites makes sure the list of provided sites exists on the controller.
// This does not run in Lambda (run-once) mode.
func (u *UnifiPoller) CheckSites() error {
if strings.Contains(strings.ToLower(u.Config.Mode), "lambda") {
return nil // Skip this in lambda mode.
}
u.LogDebugf("Checking Controller Sites List")
sites, err := u.Unifi.GetSites()
if err != nil {
return err
}
msg := []string{}
for _, site := range sites {
msg = append(msg, site.Name+" ("+site.Desc+")")
}
u.Logf("Found %d site(s) on controller: %v", len(msg), strings.Join(msg, ", "))
if StringInSlice("all", u.Config.Sites) {
u.Config.Sites = []string{"all"}
return nil
}
FIRST:
for _, s := range u.Config.Sites {
for _, site := range sites {
if s == site.Name {
continue FIRST
}
}
// This is fine, it may get added later.
u.LogErrorf("configured site not found on controller: %v", s)
}
return nil
}
// CollectMetrics grabs all the measurements from a UniFi controller and returns them.
func (u *UnifiPoller) CollectMetrics() (*metrics.Metrics, error) {
var err error
if u.Unifi == nil || u.Config.ReAuth {
// Some users need to re-auth every interval because the cookie times out.
// Sometimes we hit this path when the controller dies.
u.Logf("Re-authenticating to UniFi Controller")
if err := u.GetUnifi(); err != nil {
return nil, err
}
}
m := &metrics.Metrics{TS: u.LastCheck} // At this point, it's the Current Check.
// Get the sites we care about.
if m.Sites, err = u.GetFilteredSites(); err != nil {
return m, fmt.Errorf("unifi.GetSites(): %v", err)
}
if u.Config.SaveIDS {
m.IDSList, err = u.Unifi.GetIDS(m.Sites, time.Now().Add(u.Config.Interval.Duration), time.Now())
return m, fmt.Errorf("unifi.GetIDS(): %v", err)
}
// Get all the points.
if m.Clients, err = u.Unifi.GetClients(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetClients(): %v", err)
}
if m.Devices, err = u.Unifi.GetDevices(m.Sites); err != nil {
return m, fmt.Errorf("unifi.GetDevices(): %v", err)
}
return m, nil
}
// AugmentMetrics is our middleware layer between collecting metrics and writing them.
// This is where we can manipuate the returned data or make arbitrary decisions.
// This function currently adds parent device names to client metrics.
func (u *UnifiPoller) AugmentMetrics(metrics *metrics.Metrics) {
if metrics == nil || metrics.Devices == nil || metrics.Clients == nil {
return
}
devices := make(map[string]string)
bssdIDs := make(map[string]string)
for _, r := range metrics.UAPs {
devices[r.Mac] = r.Name
for _, v := range r.VapTable {
bssdIDs[v.Bssid] = fmt.Sprintf("%s %s %s:", r.Name, v.Radio, v.RadioName)
}
}
for _, r := range metrics.USGs {
devices[r.Mac] = r.Name
}
for _, r := range metrics.USWs {
devices[r.Mac] = r.Name
}
for _, r := range metrics.UDMs {
devices[r.Mac] = r.Name
}
// These come blank, so set them here.
for i, c := range metrics.Clients {
metrics.Clients[i].SwName = devices[c.SwMac]
metrics.Clients[i].ApName = devices[c.ApMac]
metrics.Clients[i].GwName = devices[c.GwMac]
metrics.Clients[i].RadioDescription = bssdIDs[metrics.Clients[i].Bssid] + metrics.Clients[i].RadioProto
}
if !u.Config.SaveSites {
metrics.Sites = nil
}
}
// GetFilteredSites returns a list of sites to fetch data for.
// Omits requested but unconfigured sites. Grabs the full list from the
// controller and returns the sites provided in the config file.
func (u *UnifiPoller) GetFilteredSites() (unifi.Sites, error) {
var i int
sites, err := u.Unifi.GetSites()
if err != nil {
return nil, err
} else if len(u.Config.Sites) < 1 || StringInSlice("all", u.Config.Sites) {
return sites, nil
}
for _, s := range sites {
// Only include valid sites in the request filter.
if StringInSlice(s.Name, u.Config.Sites) {
sites[i] = s
i++
}
}
return sites[:i], nil
}

View File

@ -6,43 +6,44 @@ import (
)
type uclient struct {
Anomalies *prometheus.Desc
BytesR *prometheus.Desc
CCQ *prometheus.Desc
Satisfaction *prometheus.Desc
Noise *prometheus.Desc
RoamCount *prometheus.Desc
RSSI *prometheus.Desc
RxBytes *prometheus.Desc
RxBytesR *prometheus.Desc
RxPackets *prometheus.Desc
RxRate *prometheus.Desc
Signal *prometheus.Desc
TxBytes *prometheus.Desc
TxBytesR *prometheus.Desc
TxPackets *prometheus.Desc
TxRetries *prometheus.Desc
TxPower *prometheus.Desc
TxRate *prometheus.Desc
Uptime *prometheus.Desc
WifiTxAttempts *prometheus.Desc
WiredRxBytes *prometheus.Desc
WiredRxBytesR *prometheus.Desc
WiredRxPackets *prometheus.Desc
WiredTxBytes *prometheus.Desc
WiredTxBytesR *prometheus.Desc
WiredTxPackets *prometheus.Desc
DpiStatsApp *prometheus.Desc
DpiStatsCat *prometheus.Desc
DpiStatsRxBytes *prometheus.Desc
DpiStatsRxPackets *prometheus.Desc
DpiStatsTxBytes *prometheus.Desc
DpiStatsTxPackets *prometheus.Desc
Anomalies *prometheus.Desc
BytesR *prometheus.Desc
CCQ *prometheus.Desc
Satisfaction *prometheus.Desc
Noise *prometheus.Desc
RoamCount *prometheus.Desc
RSSI *prometheus.Desc
RxBytes *prometheus.Desc
RxBytesR *prometheus.Desc
RxPackets *prometheus.Desc
RxRate *prometheus.Desc
Signal *prometheus.Desc
TxBytes *prometheus.Desc
TxBytesR *prometheus.Desc
TxPackets *prometheus.Desc
TxRetries *prometheus.Desc
TxPower *prometheus.Desc
TxRate *prometheus.Desc
Uptime *prometheus.Desc
WifiTxAttempts *prometheus.Desc
WiredRxBytes *prometheus.Desc
WiredRxBytesR *prometheus.Desc
WiredRxPackets *prometheus.Desc
WiredTxBytes *prometheus.Desc
WiredTxBytesR *prometheus.Desc
WiredTxPackets *prometheus.Desc
DPITxPackets *prometheus.Desc
DPIRxPackets *prometheus.Desc
DPITxBytes *prometheus.Desc
DPIRxBytes *prometheus.Desc
}
func descClient(ns string) *uclient {
labels := []string{"name", "mac", "site_name", "gw_name", "sw_name", "vlan", "ip", "oui", "network", "sw_port", "ap_name", "wired"}
labels := []string{"name", "mac", "site_name", "gw_name", "sw_name", "vlan",
"ip", "oui", "network", "sw_port", "ap_name", "source", "wired"}
labelW := append([]string{"radio_name", "radio", "radio_proto", "channel", "essid", "bssid", "radio_desc"}, labels...)
labelDPI := []string{"name", "mac", "site_name", "source", "category", "application"}
return &uclient{
Anomalies: prometheus.NewDesc(ns+"anomalies", "Client Anomalies", labelW, nil),
BytesR: prometheus.NewDesc(ns+"transfer_rate_bytes", "Client Data Rate", labelW, nil),
@ -63,25 +64,39 @@ func descClient(ns string) *uclient {
TxPower: prometheus.NewDesc(ns+"radio_transmit_power_dbm", "Client Transmit Power", labelW, nil),
TxRate: prometheus.NewDesc(ns+"radio_transmit_rate_bps", "Client Transmit Rate", labelW, nil),
WifiTxAttempts: prometheus.NewDesc(ns+"wifi_attempts_transmit_total", "Client Wifi Transmit Attempts", labelW, nil),
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Client Uptime", labelW, nil), // XXX: re-purpose for info tags.
/* needs more "looking into"
DpiStatsApp: prometheus.NewDesc(ns+"dpi_stats_app", "Client DPI Stats App", labels, nil),
DpiStatsCat: prometheus.NewDesc(ns+"dpi_stats_cat", "Client DPI Stats Cat", labels, nil),
DpiStatsRxBytes: prometheus.NewDesc(ns+"dpi_stats_receive_bytes_total", "Client DPI Stats Receive Bytes", labels, nil),
DpiStatsRxPackets: prometheus.NewDesc(ns+"dpi_stats_receive_packets_total", "Client DPI Stats Receive Packets", labels, nil),
DpiStatsTxBytes: prometheus.NewDesc(ns+"dpi_stats_transmit_bytes_total", "Client DPI Stats Transmit Bytes", labels, nil),
DpiStatsTxPackets: prometheus.NewDesc(ns+"dpi_stats_transmit_packets_total", "Client DPI Stats Transmit Packets", labels, nil),
*/
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Client Uptime", labelW, nil),
DPITxPackets: prometheus.NewDesc(ns+"dpi_transmit_packets", "Client DPI Transmit Packets", labelDPI, nil),
DPIRxPackets: prometheus.NewDesc(ns+"dpi_receive_packets", "Client DPI Receive Packets", labelDPI, nil),
DPITxBytes: prometheus.NewDesc(ns+"dpi_transmit_bytes", "Client DPI Transmit Bytes", labelDPI, nil),
DPIRxBytes: prometheus.NewDesc(ns+"dpi_receive_bytes", "Client DPI Receive Bytes", labelDPI, nil),
}
}
func (u *promUnifi) exportClientDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
labelDPI := []string{s.Name, s.MAC, s.SiteName, s.SourceName,
unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App)}
// log.Println(labelDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
r.send([]*metric{
{u.Client.DPITxPackets, gauge, dpi.TxPackets, labelDPI},
{u.Client.DPIRxPackets, gauge, dpi.RxPackets, labelDPI},
{u.Client.DPITxBytes, gauge, dpi.TxBytes, labelDPI},
{u.Client.DPIRxBytes, gauge, dpi.RxBytes, labelDPI},
})
}
}
func (u *promUnifi) exportClient(r report, c *unifi.Client) {
labels := []string{c.Name, c.Mac, c.SiteName, c.GwName, c.SwName, c.Vlan.Txt, c.IP, c.Oui, c.Network, c.SwPort.Txt, c.ApName, ""}
labelW := append([]string{c.RadioName, c.Radio, c.RadioProto, c.Channel.Txt, c.Essid, c.Bssid, c.RadioDescription}, labels...)
labels := []string{c.Name, c.Mac, c.SiteName, c.GwName, c.SwName, c.Vlan.Txt,
c.IP, c.Oui, c.Network, c.SwPort.Txt, c.ApName, c.SourceName, ""}
labelW := append([]string{c.RadioName, c.Radio, c.RadioProto, c.Channel.Txt,
c.Essid, c.Bssid, c.RadioDescription}, labels...)
if c.IsWired.Val {
labels[len(labels)-1] = "true"
labelW[len(labelW)-1] = "true"
r.send([]*metric{
{u.Client.RxBytes, counter, c.WiredRxBytes, labels},
{u.Client.RxBytesR, gauge, c.WiredRxBytesR, labels},
@ -93,6 +108,7 @@ func (u *promUnifi) exportClient(r report, c *unifi.Client) {
} else {
labels[len(labels)-1] = "false"
labelW[len(labelW)-1] = "false"
r.send([]*metric{
{u.Client.Anomalies, counter, c.Anomalies, labelW},
{u.Client.CCQ, gauge, float64(c.Ccq) / 1000.0, labelW},
@ -115,13 +131,6 @@ func (u *promUnifi) exportClient(r report, c *unifi.Client) {
{u.Client.BytesR, gauge, c.BytesR, labelW},
})
}
r.send([]*metric{{u.Client.Uptime, gauge, c.Uptime, labelW}})
/* needs more "looking into"
{u.Client.DpiStatsApp, gauge, c.DpiStats.App, labels},
{u.Client.DpiStatsCat, gauge, c.DpiStats.Cat, labels},
{u.Client.DpiStatsRxBytes, counter, c.DpiStats.RxBytes, labels},
{u.Client.DpiStatsRxPackets, counter, c.DpiStats.RxPackets, labels},
{u.Client.DpiStatsTxBytes, counter, c.DpiStats.TxBytes, labels},
{u.Client.DpiStatsTxPackets, counter, c.DpiStats.TxPackets, labels},
*/
}

View File

@ -1,50 +1,58 @@
// Package promunifi provides the bridge between unifi metrics and prometheus.
// Package promunifi provides the bridge between unifi-poller metrics and prometheus.
package promunifi
import (
"fmt"
"net/http"
"reflect"
"strings"
"sync"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
"golift.io/unifi"
)
// channel buffer, fits at least one batch.
const buffer = 50
const (
// channel buffer, fits at least one batch.
defaultBuffer = 50
defaultHTTPListen = "0.0.0.0:9130"
// simply fewer letters.
counter = prometheus.CounterValue
gauge = prometheus.GaugeValue
)
// simply fewer letters.
const counter = prometheus.CounterValue
const gauge = prometheus.GaugeValue
type promUnifi struct {
*Config `json:"prometheus" toml:"prometheus" xml:"prometheus" yaml:"prometheus"`
Client *uclient
Device *unifiDevice
UAP *uap
USG *usg
USW *usw
Site *site
// This interface is passed to the Collect() method. The Collect method uses
// this interface to retrieve the latest UniFi measurements and export them.
Collector poller.Collect
}
// UnifiCollectorCnfg defines the data needed to collect and report UniFi Metrics.
type UnifiCollectorCnfg struct {
// Config is the input (config file) data used to initialize this output plugin.
type Config struct {
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string
Namespace string `json:"namespace" toml:"namespace" xml:"namespace" yaml:"namespace"`
HTTPListen string `json:"http_listen" toml:"http_listen" xml:"http_listen" yaml:"http_listen"`
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. Possibly, no metrics
// will be collected at all.
ReportErrors bool
// This function is passed to the Collect() method. The Collect method runs
// this function to retrieve the latest UniFi measurements and export them.
CollectFn func() (*metrics.Metrics, error)
// Provide a logger function if you want to run a routine *after* prometheus checks in.
LoggingFn func(*Report)
}
type promUnifi struct {
Config UnifiCollectorCnfg
Client *uclient
Device *unifiDevice
UAP *uap
USG *usg
USW *usw
Site *site
ReportErrors bool `json:"report_errors" toml:"report_errors" xml:"report_errors" yaml:"report_errors"`
Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
// Buffer is a channel buffer.
// Default is probably 50. Seems fast there; try 1 to see if CPU usage goes down?
Buffer int `json:"buffer" toml:"buffer" xml:"buffer" yaml:"buffer"`
}
type metric struct {
@ -54,39 +62,117 @@ type metric struct {
Labels []string
}
// Report is passed into LoggingFn to log the export metrics to stdout (outside this package).
// Report accumulates counters that are printed to a log line.
type Report struct {
Total int // Total count of metrics recorded.
Errors int // Total count of errors recording metrics.
Zeros int // Total count of metrics equal to zero.
Descs int // Total count of unique metrics descriptions.
Metrics *metrics.Metrics // Metrics collected and recorded.
Elapsed time.Duration // Duration elapsed collecting and exporting.
Fetch time.Duration // Duration elapsed making controller requests.
Start time.Time // Time collection began.
*Config
Total int // Total count of metrics recorded.
Errors int // Total count of errors recording metrics.
Zeros int // Total count of metrics equal to zero.
Metrics *poller.Metrics // Metrics collected and recorded.
Elapsed time.Duration // Duration elapsed collecting and exporting.
Fetch time.Duration // Duration elapsed making controller requests.
Start time.Time // Time collection began.
ch chan []*metric
wg sync.WaitGroup
cf UnifiCollectorCnfg
}
// NewUnifiCollector returns a prometheus collector that will export any available
// UniFi metrics. You must provide a collection function in the opts.
func NewUnifiCollector(opts UnifiCollectorCnfg) prometheus.Collector {
if opts.CollectFn == nil {
panic("nil collector function")
// target is used for targeted (sometimes dynamic) metrics scrapes.
type target struct {
*poller.Filter
u *promUnifi
}
func init() {
u := &promUnifi{Config: &Config{}}
poller.NewOutput(&poller.Output{
Name: "prometheus",
Config: u,
Method: u.Run,
})
}
// Run creates the collectors and starts the web server up.
// Should be run in a Go routine. Returns nil if not configured.
func (u *promUnifi) Run(c poller.Collect) error {
if u.Disable {
return nil
}
if opts.Namespace = strings.Trim(opts.Namespace, "_") + "_"; opts.Namespace == "_" {
opts.Namespace = ""
u.Namespace = strings.Trim(strings.Replace(u.Namespace, "-", "_", -1), "_")
if u.Namespace == "" {
u.Namespace = strings.Replace(poller.AppName, "-", "", -1)
}
return &promUnifi{
Config: opts,
Client: descClient(opts.Namespace + "client_"),
Device: descDevice(opts.Namespace + "device_"), // stats for all device types.
UAP: descUAP(opts.Namespace + "device_"),
USG: descUSG(opts.Namespace + "device_"),
USW: descUSW(opts.Namespace + "device_"),
Site: descSite(opts.Namespace + "site_"),
if u.HTTPListen == "" {
u.HTTPListen = defaultHTTPListen
}
if u.Buffer == 0 {
u.Buffer = defaultBuffer
}
// Later can pass this in from poller by adding a method to the interface.
u.Collector = c
u.Client = descClient(u.Namespace + "_client_")
u.Device = descDevice(u.Namespace + "_device_") // stats for all device types.
u.UAP = descUAP(u.Namespace + "_device_")
u.USG = descUSG(u.Namespace + "_device_")
u.USW = descUSW(u.Namespace + "_device_")
u.Site = descSite(u.Namespace + "_site_")
mux := http.NewServeMux()
prometheus.MustRegister(version.NewCollector(u.Namespace))
prometheus.MustRegister(u)
c.Logf("Prometheus exported at https://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
mux.Handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer,
promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
))
mux.HandleFunc("/scrape", u.ScrapeHandler)
mux.HandleFunc("/", u.DefaultHandler)
return http.ListenAndServe(u.HTTPListen, mux)
}
// ScrapeHandler allows prometheus to scrape a single source, instead of all sources.
func (u *promUnifi) ScrapeHandler(w http.ResponseWriter, r *http.Request) {
t := &target{u: u, Filter: &poller.Filter{
Name: r.URL.Query().Get("input"), // "unifi"
Path: r.URL.Query().Get("path"), // url: "https://127.0.0.1:8443"
Role: r.URL.Query().Get("role"), // configured role in up.conf.
}}
if t.Name == "" {
u.Collector.LogErrorf("input parameter missing on scrape from %v", r.RemoteAddr)
http.Error(w, `'input' parameter must be specified (try "unifi")`, 400)
return
}
if t.Role == "" && t.Path == "" {
u.Collector.LogErrorf("role and path parameters missing on scrape from %v", r.RemoteAddr)
http.Error(w, "'role' OR 'path' parameter must be specified: configured role OR unconfigured url", 400)
return
}
registry := prometheus.NewRegistry()
registry.MustRegister(t)
promhttp.HandlerFor(
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
).ServeHTTP(w, r)
}
func (u *promUnifi) DefaultHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, _ = w.Write([]byte(poller.AppName + "\n"))
}
// Describe satisfies the prometheus Collector. This returns all of the
// metric descriptions that this packages produces.
func (t *target) Describe(ch chan<- *prometheus.Desc) {
t.u.Describe(ch)
}
// Describe satisfies the prometheus Collector. This returns all of the
@ -94,6 +180,7 @@ func NewUnifiCollector(opts UnifiCollectorCnfg) prometheus.Collector {
func (u *promUnifi) Describe(ch chan<- *prometheus.Desc) {
for _, f := range []interface{}{u.Client, u.Device, u.UAP, u.USG, u.USW, u.Site} {
v := reflect.Indirect(reflect.ValueOf(f))
// Loop each struct member and send it to the provided channel.
for i := 0; i < v.NumField(); i++ {
desc, ok := v.Field(i).Interface().(*prometheus.Desc)
@ -104,18 +191,45 @@ func (u *promUnifi) Describe(ch chan<- *prometheus.Desc) {
}
}
// Collect satisfies the prometheus Collector. This runs for a single controller poll.
func (t *target) Collect(ch chan<- prometheus.Metric) {
t.u.collect(ch, t.Filter)
}
// Collect satisfies the prometheus Collector. This runs the input method to get
// the current metrics (from another package) then exports them for prometheus.
func (u *promUnifi) Collect(ch chan<- prometheus.Metric) {
u.collect(ch, nil)
}
func (u *promUnifi) collect(ch chan<- prometheus.Metric, filter *poller.Filter) {
var err error
r := &Report{cf: u.Config, ch: make(chan []*metric, buffer), Start: time.Now()}
r := &Report{
Config: u.Config,
ch: make(chan []*metric, u.Config.Buffer),
Start: time.Now()}
defer r.close()
if r.Metrics, err = r.cf.CollectFn(); err != nil {
r.error(ch, prometheus.NewInvalidDesc(fmt.Errorf("metric fetch failed")), err)
return
ok := false
if filter == nil {
r.Metrics, ok, err = u.Collector.Metrics()
} else {
r.Metrics, ok, err = u.Collector.MetricsFrom(filter)
}
r.Fetch = time.Since(r.Start)
if err != nil {
r.error(ch, prometheus.NewInvalidDesc(err), fmt.Errorf("metric fetch failed"))
u.Collector.LogErrorf("metric fetch failed: %v", err)
if !ok {
return
}
}
if r.Metrics.Devices == nil {
r.Metrics.Devices = &unifi.Devices{}
}
@ -130,10 +244,12 @@ func (u *promUnifi) Collect(ch chan<- prometheus.Metric) {
// This is where our channels connects to the prometheus channel.
func (u *promUnifi) exportMetrics(r report, ch chan<- prometheus.Metric, ourChan chan []*metric) {
descs := make(map[*prometheus.Desc]bool) // used as a counter
defer r.report(descs)
defer r.report(u.Collector, descs)
for newMetrics := range ourChan {
for _, m := range newMetrics {
descs[m.Desc] = true
switch v := m.Value.(type) {
case unifi.FlexInt:
ch <- r.export(m, v.Val)
@ -147,57 +263,84 @@ func (u *promUnifi) exportMetrics(r report, ch chan<- prometheus.Metric, ourChan
r.error(ch, m.Desc, fmt.Sprintf("not a number: %v", m.Value))
}
}
r.done()
}
}
func (u *promUnifi) loopExports(r report) {
m := r.metrics()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.Sites {
u.exportSite(r, s)
}
}()
r.add()
go func() {
defer r.done()
for _, s := range m.SitesDPI {
u.exportSiteDPI(r, s)
}
}()
go func() {
defer r.done()
for _, c := range m.Clients {
u.exportClient(r, c)
}
}()
go func() {
defer r.done()
for _, c := range m.ClientsDPI {
u.exportClientDPI(r, c)
}
}()
go func() {
defer r.done()
for _, d := range m.UAPs {
u.exportUAP(r, d)
}
}()
r.add()
go func() {
defer r.done()
for _, d := range m.UDMs {
u.exportUDM(r, d)
}
}()
r.add()
go func() {
defer r.done()
for _, d := range m.USGs {
u.exportUSG(r, d)
}
}()
r.add()
go func() {
defer r.done()
for _, d := range m.USWs {
u.exportUSW(r, d)
}
}()
r.add()
go func() {
defer r.done()
for _, c := range m.Clients {
u.exportClient(r, c)
}
}()
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"time"
"github.com/davidnewhall/unifi-poller/pkg/metrics"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/prometheus/client_golang/prometheus"
)
@ -16,14 +16,15 @@ type report interface {
add()
done()
send([]*metric)
metrics() *metrics.Metrics
report(descs map[*prometheus.Desc]bool)
metrics() *poller.Metrics
report(c poller.Collect, descs map[*prometheus.Desc]bool)
export(m *metric, v float64) prometheus.Metric
error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{})
}
// satisfy gomnd
const one = 1
const oneDecimalPoint = 10.0
func (r *Report) add() {
r.wg.Add(one)
@ -38,29 +39,35 @@ func (r *Report) send(m []*metric) {
r.ch <- m
}
func (r *Report) metrics() *metrics.Metrics {
func (r *Report) metrics() *poller.Metrics {
return r.Metrics
}
func (r *Report) report(descs map[*prometheus.Desc]bool) {
if r.cf.LoggingFn == nil {
return
}
r.Descs = len(descs)
r.cf.LoggingFn(r)
func (r *Report) report(c poller.Collect, descs map[*prometheus.Desc]bool) {
m := r.Metrics
c.Logf("UniFi Measurements Exported. Site: %d, Client: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, Descs: %d, "+
"Metrics: %d, Errs: %d, 0s: %d, Reqs/Total: %v / %v",
len(m.Sites), len(m.Clients), len(m.UAPs), len(m.UDMs)+len(m.USGs), len(m.USWs),
len(descs), r.Total, r.Errors, r.Zeros,
r.Fetch.Round(time.Millisecond/oneDecimalPoint),
r.Elapsed.Round(time.Millisecond/oneDecimalPoint))
}
func (r *Report) export(m *metric, v float64) prometheus.Metric {
r.Total++
if v == 0 {
r.Zeros++
}
return prometheus.MustNewConstMetric(m.Desc, m.ValueType, v, m.Labels...)
}
func (r *Report) error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{}) {
r.Errors++
if r.cf.ReportErrors {
if r.ReportErrors {
ch <- prometheus.NewInvalidMetric(d, fmt.Errorf("error: %v", v))
}
}

View File

@ -31,43 +31,67 @@ type site struct {
RemoteUserTxBytes *prometheus.Desc
RemoteUserRxPackets *prometheus.Desc
RemoteUserTxPackets *prometheus.Desc
DPITxPackets *prometheus.Desc
DPIRxPackets *prometheus.Desc
DPITxBytes *prometheus.Desc
DPIRxBytes *prometheus.Desc
}
func descSite(ns string) *site {
labels := []string{"subsystem", "status", "site_name"}
labels := []string{"subsystem", "status", "site_name", "source"}
labelDPI := []string{"category", "application", "site_name", "source"}
nd := prometheus.NewDesc
return &site{
NumUser: prometheus.NewDesc(ns+"users", "Number of Users", labels, nil),
NumGuest: prometheus.NewDesc(ns+"guests", "Number of Guests", labels, nil),
NumIot: prometheus.NewDesc(ns+"iots", "Number of IoT Devices", labels, nil),
TxBytesR: prometheus.NewDesc(ns+"transmit_rate_bytes", "Bytes Transmit Rate", labels, nil),
RxBytesR: prometheus.NewDesc(ns+"receive_rate_bytes", "Bytes Receive Rate", labels, nil),
NumAp: prometheus.NewDesc(ns+"aps", "Access Point Count", labels, nil),
NumAdopted: prometheus.NewDesc(ns+"adopted", "Adoption Count", labels, nil),
NumDisabled: prometheus.NewDesc(ns+"disabled", "Disabled Count", labels, nil),
NumDisconnected: prometheus.NewDesc(ns+"disconnected", "Disconnected Count", labels, nil),
NumPending: prometheus.NewDesc(ns+"pending", "Pending Count", labels, nil),
NumGw: prometheus.NewDesc(ns+"gateways", "Gateway Count", labels, nil),
NumSw: prometheus.NewDesc(ns+"switches", "Switch Count", labels, nil),
NumSta: prometheus.NewDesc(ns+"stations", "Station Count", labels, nil),
Latency: prometheus.NewDesc(ns+"latency_seconds", "Latency", labels, nil),
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Uptime", labels, nil),
Drops: prometheus.NewDesc(ns+"intenet_drops_total", "Internet (WAN) Disconnections", labels, nil),
XputUp: prometheus.NewDesc(ns+"xput_up_rate", "Speedtest Upload", labels, nil),
XputDown: prometheus.NewDesc(ns+"xput_down_rate", "Speedtest Download", labels, nil),
SpeedtestPing: prometheus.NewDesc(ns+"speedtest_ping", "Speedtest Ping", labels, nil),
RemoteUserNumActive: prometheus.NewDesc(ns+"remote_user_active", "Remote Users Active", labels, nil),
RemoteUserNumInactive: prometheus.NewDesc(ns+"remote_user_inactive", "Remote Users Inactive", labels, nil),
RemoteUserRxBytes: prometheus.NewDesc(ns+"remote_user_receive_bytes_total", "Remote Users Receive Bytes", labels, nil),
RemoteUserTxBytes: prometheus.NewDesc(ns+"remote_user_transmit_bytes_total", "Remote Users Transmit Bytes", labels, nil),
RemoteUserRxPackets: prometheus.NewDesc(ns+"remote_user_receive_packets_total", "Remote Users Receive Packets", labels, nil),
RemoteUserTxPackets: prometheus.NewDesc(ns+"remote_user_transmit_packets_total", "Remote Users Transmit Packets", labels, nil),
NumUser: nd(ns+"users", "Number of Users", labels, nil),
NumGuest: nd(ns+"guests", "Number of Guests", labels, nil),
NumIot: nd(ns+"iots", "Number of IoT Devices", labels, nil),
TxBytesR: nd(ns+"transmit_rate_bytes", "Bytes Transmit Rate", labels, nil),
RxBytesR: nd(ns+"receive_rate_bytes", "Bytes Receive Rate", labels, nil),
NumAp: nd(ns+"aps", "Access Point Count", labels, nil),
NumAdopted: nd(ns+"adopted", "Adoption Count", labels, nil),
NumDisabled: nd(ns+"disabled", "Disabled Count", labels, nil),
NumDisconnected: nd(ns+"disconnected", "Disconnected Count", labels, nil),
NumPending: nd(ns+"pending", "Pending Count", labels, nil),
NumGw: nd(ns+"gateways", "Gateway Count", labels, nil),
NumSw: nd(ns+"switches", "Switch Count", labels, nil),
NumSta: nd(ns+"stations", "Station Count", labels, nil),
Latency: nd(ns+"latency_seconds", "Latency", labels, nil),
Uptime: nd(ns+"uptime_seconds", "Uptime", labels, nil),
Drops: nd(ns+"intenet_drops_total", "Internet (WAN) Disconnections", labels, nil),
XputUp: nd(ns+"xput_up_rate", "Speedtest Upload", labels, nil),
XputDown: nd(ns+"xput_down_rate", "Speedtest Download", labels, nil),
SpeedtestPing: nd(ns+"speedtest_ping", "Speedtest Ping", labels, nil),
RemoteUserNumActive: nd(ns+"remote_user_active", "Remote Users Active", labels, nil),
RemoteUserNumInactive: nd(ns+"remote_user_inactive", "Remote Users Inactive", labels, nil),
RemoteUserRxBytes: nd(ns+"remote_user_receive_bytes_total", "Remote Users Receive Bytes", labels, nil),
RemoteUserTxBytes: nd(ns+"remote_user_transmit_bytes_total", "Remote Users Transmit Bytes", labels, nil),
RemoteUserRxPackets: nd(ns+"remote_user_receive_packets_total", "Remote Users Receive Packets", labels, nil),
RemoteUserTxPackets: nd(ns+"remote_user_transmit_packets_total", "Remote Users Transmit Packets", labels, nil),
DPITxPackets: nd(ns+"dpi_transmit_packets", "Site DPI Transmit Packets", labelDPI, nil),
DPIRxPackets: nd(ns+"dpi_receive_packets", "Site DPI Receive Packets", labelDPI, nil),
DPITxBytes: nd(ns+"dpi_transmit_bytes", "Site DPI Transmit Bytes", labelDPI, nil),
DPIRxBytes: nd(ns+"dpi_receive_bytes", "Site DPI Receive Bytes", labelDPI, nil),
}
}
func (u *promUnifi) exportSiteDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
labelDPI := []string{unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App), s.SiteName, s.SourceName}
// log.Println(labelsDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
r.send([]*metric{
{u.Site.DPITxPackets, gauge, dpi.TxPackets, labelDPI},
{u.Site.DPIRxPackets, gauge, dpi.RxPackets, labelDPI},
{u.Site.DPITxBytes, gauge, dpi.TxBytes, labelDPI},
{u.Site.DPIRxBytes, gauge, dpi.RxBytes, labelDPI},
})
}
}
func (u *promUnifi) exportSite(r report, s *unifi.Site) {
for _, h := range s.Health {
labels := []string{h.Subsystem, h.Status, s.SiteName}
switch h.Subsystem {
switch labels := []string{h.Subsystem, h.Status, s.SiteName, s.SourceName}; labels[0] {
case "www":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
@ -79,7 +103,6 @@ func (u *promUnifi) exportSite(r report, s *unifi.Site) {
{u.Site.SpeedtestPing, gauge, h.SpeedtestPing, labels},
{u.Site.Drops, counter, h.Drops, labels},
})
case "wlan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
@ -93,7 +116,6 @@ func (u *promUnifi) exportSite(r report, s *unifi.Site) {
{u.Site.NumAp, gauge, h.NumAp, labels},
{u.Site.NumDisabled, gauge, h.NumDisabled, labels},
})
case "wan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
@ -104,7 +126,6 @@ func (u *promUnifi) exportSite(r report, s *unifi.Site) {
{u.Site.NumGw, gauge, h.NumGw, labels},
{u.Site.NumSta, gauge, h.NumSta, labels},
})
case "lan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
@ -117,7 +138,6 @@ func (u *promUnifi) exportSite(r report, s *unifi.Site) {
{u.Site.NumIot, gauge, h.NumIot, labels},
{u.Site.NumSw, gauge, h.NumSw, labels},
})
case "vpn":
r.send([]*metric{
{u.Site.RemoteUserNumActive, gauge, h.RemoteUserNumActive, labels},

View File

@ -80,81 +80,83 @@ type uap struct {
}
func descUAP(ns string) *uap {
labelA := []string{"stat", "site_name", "name"} // stat + labels[1:]
labelV := []string{"vap_name", "bssid", "radio", "radio_name", "essid", "usage", "site_name", "name"}
labelR := []string{"radio_name", "radio", "site_name", "name"}
labelA := []string{"stat", "site_name", "name", "source"} // stat + labels[1:]
labelV := []string{"vap_name", "bssid", "radio", "radio_name", "essid", "usage", "site_name", "name", "source"}
labelR := []string{"radio_name", "radio", "site_name", "name", "source"}
nd := prometheus.NewDesc
return &uap{
// 3x each - stat table: total, guest, user
ApWifiTxDropped: prometheus.NewDesc(ns+"stat_wifi_transmt_dropped_total", "Wifi Transmissions Dropped", labelA, nil),
ApRxErrors: prometheus.NewDesc(ns+"stat_receive_errors_total", "Receive Errors", labelA, nil),
ApRxDropped: prometheus.NewDesc(ns+"stat_receive_dropped_total", "Receive Dropped", labelA, nil),
ApRxFrags: prometheus.NewDesc(ns+"stat_receive_frags_total", "Received Frags", labelA, nil),
ApRxCrypts: prometheus.NewDesc(ns+"stat_receive_crypts_total", "Receive Crypts", labelA, nil),
ApTxPackets: prometheus.NewDesc(ns+"stat_transmit_packets_total", "Transmit Packets", labelA, nil),
ApTxBytes: prometheus.NewDesc(ns+"stat_transmit_bytes_total", "Transmit Bytes", labelA, nil),
ApTxErrors: prometheus.NewDesc(ns+"stat_transmit_errors_total", "Transmit Errors", labelA, nil),
ApTxDropped: prometheus.NewDesc(ns+"stat_transmit_dropped_total", "Transmit Dropped", labelA, nil),
ApTxRetries: prometheus.NewDesc(ns+"stat_retries_tx_total", "Transmit Retries", labelA, nil),
ApRxPackets: prometheus.NewDesc(ns+"stat_receive_packets_total", "Receive Packets", labelA, nil),
ApRxBytes: prometheus.NewDesc(ns+"stat_receive_bytes_total", "Receive Bytes", labelA, nil),
WifiTxAttempts: prometheus.NewDesc(ns+"stat_wifi_transmit_attempts_total", "Wifi Transmission Attempts", labelA, nil),
MacFilterRejections: prometheus.NewDesc(ns+"stat_mac_filter_rejects_total", "MAC Filter Rejections", labelA, nil),
ApWifiTxDropped: nd(ns+"stat_wifi_transmt_dropped_total", "Wifi Transmissions Dropped", labelA, nil),
ApRxErrors: nd(ns+"stat_receive_errors_total", "Receive Errors", labelA, nil),
ApRxDropped: nd(ns+"stat_receive_dropped_total", "Receive Dropped", labelA, nil),
ApRxFrags: nd(ns+"stat_receive_frags_total", "Received Frags", labelA, nil),
ApRxCrypts: nd(ns+"stat_receive_crypts_total", "Receive Crypts", labelA, nil),
ApTxPackets: nd(ns+"stat_transmit_packets_total", "Transmit Packets", labelA, nil),
ApTxBytes: nd(ns+"stat_transmit_bytes_total", "Transmit Bytes", labelA, nil),
ApTxErrors: nd(ns+"stat_transmit_errors_total", "Transmit Errors", labelA, nil),
ApTxDropped: nd(ns+"stat_transmit_dropped_total", "Transmit Dropped", labelA, nil),
ApTxRetries: nd(ns+"stat_retries_tx_total", "Transmit Retries", labelA, nil),
ApRxPackets: nd(ns+"stat_receive_packets_total", "Receive Packets", labelA, nil),
ApRxBytes: nd(ns+"stat_receive_bytes_total", "Receive Bytes", labelA, nil),
WifiTxAttempts: nd(ns+"stat_wifi_transmit_attempts_total", "Wifi Transmission Attempts", labelA, nil),
MacFilterRejections: nd(ns+"stat_mac_filter_rejects_total", "MAC Filter Rejections", labelA, nil),
// N each - 1 per Virtual AP (VAP)
VAPCcq: prometheus.NewDesc(ns+"vap_ccq_ratio", "VAP Client Connection Quality", labelV, nil),
VAPMacFilterRejections: prometheus.NewDesc(ns+"vap_mac_filter_rejects_total", "VAP MAC Filter Rejections", labelV, nil),
VAPNumSatisfactionSta: prometheus.NewDesc(ns+"vap_satisfaction_stations", "VAP Number Satisifaction Stations", labelV, nil),
VAPAvgClientSignal: prometheus.NewDesc(ns+"vap_average_client_signal", "VAP Average Client Signal", labelV, nil),
VAPSatisfaction: prometheus.NewDesc(ns+"vap_satisfaction_ratio", "VAP Satisfaction", labelV, nil),
VAPSatisfactionNow: prometheus.NewDesc(ns+"vap_satisfaction_now_ratio", "VAP Satisfaction Now", labelV, nil),
VAPDNSAvgLatency: prometheus.NewDesc(ns+"vap_dns_latency_average_seconds", "VAP DNS Latency Average", labelV, nil),
VAPRxBytes: prometheus.NewDesc(ns+"vap_receive_bytes_total", "VAP Bytes Received", labelV, nil),
VAPRxCrypts: prometheus.NewDesc(ns+"vap_receive_crypts_total", "VAP Crypts Received", labelV, nil),
VAPRxDropped: prometheus.NewDesc(ns+"vap_receive_dropped_total", "VAP Dropped Received", labelV, nil),
VAPRxErrors: prometheus.NewDesc(ns+"vap_receive_errors_total", "VAP Errors Received", labelV, nil),
VAPRxFrags: prometheus.NewDesc(ns+"vap_receive_frags_total", "VAP Frags Received", labelV, nil),
VAPRxNwids: prometheus.NewDesc(ns+"vap_receive_nwids_total", "VAP Nwids Received", labelV, nil),
VAPRxPackets: prometheus.NewDesc(ns+"vap_receive_packets_total", "VAP Packets Received", labelV, nil),
VAPTxBytes: prometheus.NewDesc(ns+"vap_transmit_bytes_total", "VAP Bytes Transmitted", labelV, nil),
VAPTxDropped: prometheus.NewDesc(ns+"vap_transmit_dropped_total", "VAP Dropped Transmitted", labelV, nil),
VAPTxErrors: prometheus.NewDesc(ns+"vap_transmit_errors_total", "VAP Errors Transmitted", labelV, nil),
VAPTxPackets: prometheus.NewDesc(ns+"vap_transmit_packets_total", "VAP Packets Transmitted", labelV, nil),
VAPTxPower: prometheus.NewDesc(ns+"vap_transmit_power", "VAP Transmit Power", labelV, nil),
VAPTxRetries: prometheus.NewDesc(ns+"vap_transmit_retries_total", "VAP Retries Transmitted", labelV, nil),
VAPTxCombinedRetries: prometheus.NewDesc(ns+"vap_transmit_retries_combined_total", "VAP Retries Combined Transmitted", labelV, nil),
VAPTxDataMpduBytes: prometheus.NewDesc(ns+"vap_data_mpdu_transmit_bytes_total", "VAP Data MPDU Bytes Transmitted", labelV, nil),
VAPTxRtsRetries: prometheus.NewDesc(ns+"vap_transmit_rts_retries_total", "VAP RTS Retries Transmitted", labelV, nil),
VAPTxSuccess: prometheus.NewDesc(ns+"vap_transmit_success_total", "VAP Success Transmits", labelV, nil),
VAPTxTotal: prometheus.NewDesc(ns+"vap_transmit_total", "VAP Transmit Total", labelV, nil),
VAPTxGoodbytes: prometheus.NewDesc(ns+"vap_transmit_goodbyes", "VAP Goodbyes Transmitted", labelV, nil),
VAPTxLatAvg: prometheus.NewDesc(ns+"vap_transmit_latency_average_seconds", "VAP Latency Average Transmit", labelV, nil),
VAPTxLatMax: prometheus.NewDesc(ns+"vap_transmit_latency_maximum_seconds", "VAP Latency Maximum Transmit", labelV, nil),
VAPTxLatMin: prometheus.NewDesc(ns+"vap_transmit_latency_minimum_seconds", "VAP Latency Minimum Transmit", labelV, nil),
VAPRxGoodbytes: prometheus.NewDesc(ns+"vap_receive_goodbyes", "VAP Goodbyes Received", labelV, nil),
VAPRxLatAvg: prometheus.NewDesc(ns+"vap_receive_latency_average_seconds", "VAP Latency Average Receive", labelV, nil),
VAPRxLatMax: prometheus.NewDesc(ns+"vap_receive_latency_maximum_seconds", "VAP Latency Maximum Receive", labelV, nil),
VAPRxLatMin: prometheus.NewDesc(ns+"vap_receive_latency_minimum_seconds", "VAP Latency Minimum Receive", labelV, nil),
VAPWifiTxLatencyMovAvg: prometheus.NewDesc(ns+"vap_transmit_latency_moving_avg_seconds", "VAP Latency Moving Average Tramsit", labelV, nil),
VAPWifiTxLatencyMovMax: prometheus.NewDesc(ns+"vap_transmit_latency_moving_max_seconds", "VAP Latency Moving Maximum Tramsit", labelV, nil),
VAPWifiTxLatencyMovMin: prometheus.NewDesc(ns+"vap_transmit_latency_moving_min_seconds", "VAP Latency Moving Minimum Tramsit", labelV, nil),
VAPWifiTxLatencyMovTotal: prometheus.NewDesc(ns+"vap_transmit_latency_moving_total", "VAP Latency Moving Total Tramsit", labelV, nil),
VAPWifiTxLatencyMovCount: prometheus.NewDesc(ns+"vap_transmit_latency_moving_count", "VAP Latency Moving Count Tramsit", labelV, nil),
VAPCcq: nd(ns+"vap_ccq_ratio", "VAP Client Connection Quality", labelV, nil),
VAPMacFilterRejections: nd(ns+"vap_mac_filter_rejects_total", "VAP MAC Filter Rejections", labelV, nil),
VAPNumSatisfactionSta: nd(ns+"vap_satisfaction_stations", "VAP Number Satisifaction Stations", labelV, nil),
VAPAvgClientSignal: nd(ns+"vap_average_client_signal", "VAP Average Client Signal", labelV, nil),
VAPSatisfaction: nd(ns+"vap_satisfaction_ratio", "VAP Satisfaction", labelV, nil),
VAPSatisfactionNow: nd(ns+"vap_satisfaction_now_ratio", "VAP Satisfaction Now", labelV, nil),
VAPDNSAvgLatency: nd(ns+"vap_dns_latency_average_seconds", "VAP DNS Latency Average", labelV, nil),
VAPRxBytes: nd(ns+"vap_receive_bytes_total", "VAP Bytes Received", labelV, nil),
VAPRxCrypts: nd(ns+"vap_receive_crypts_total", "VAP Crypts Received", labelV, nil),
VAPRxDropped: nd(ns+"vap_receive_dropped_total", "VAP Dropped Received", labelV, nil),
VAPRxErrors: nd(ns+"vap_receive_errors_total", "VAP Errors Received", labelV, nil),
VAPRxFrags: nd(ns+"vap_receive_frags_total", "VAP Frags Received", labelV, nil),
VAPRxNwids: nd(ns+"vap_receive_nwids_total", "VAP Nwids Received", labelV, nil),
VAPRxPackets: nd(ns+"vap_receive_packets_total", "VAP Packets Received", labelV, nil),
VAPTxBytes: nd(ns+"vap_transmit_bytes_total", "VAP Bytes Transmitted", labelV, nil),
VAPTxDropped: nd(ns+"vap_transmit_dropped_total", "VAP Dropped Transmitted", labelV, nil),
VAPTxErrors: nd(ns+"vap_transmit_errors_total", "VAP Errors Transmitted", labelV, nil),
VAPTxPackets: nd(ns+"vap_transmit_packets_total", "VAP Packets Transmitted", labelV, nil),
VAPTxPower: nd(ns+"vap_transmit_power", "VAP Transmit Power", labelV, nil),
VAPTxRetries: nd(ns+"vap_transmit_retries_total", "VAP Retries Transmitted", labelV, nil),
VAPTxCombinedRetries: nd(ns+"vap_transmit_retries_combined_total", "VAP Retries Combined Tx", labelV, nil),
VAPTxDataMpduBytes: nd(ns+"vap_data_mpdu_transmit_bytes_total", "VAP Data MPDU Bytes Tx", labelV, nil),
VAPTxRtsRetries: nd(ns+"vap_transmit_rts_retries_total", "VAP RTS Retries Transmitted", labelV, nil),
VAPTxSuccess: nd(ns+"vap_transmit_success_total", "VAP Success Transmits", labelV, nil),
VAPTxTotal: nd(ns+"vap_transmit_total", "VAP Transmit Total", labelV, nil),
VAPTxGoodbytes: nd(ns+"vap_transmit_goodbyes", "VAP Goodbyes Transmitted", labelV, nil),
VAPTxLatAvg: nd(ns+"vap_transmit_latency_average_seconds", "VAP Latency Average Tx", labelV, nil),
VAPTxLatMax: nd(ns+"vap_transmit_latency_maximum_seconds", "VAP Latency Maximum Tx", labelV, nil),
VAPTxLatMin: nd(ns+"vap_transmit_latency_minimum_seconds", "VAP Latency Minimum Tx", labelV, nil),
VAPRxGoodbytes: nd(ns+"vap_receive_goodbyes", "VAP Goodbyes Received", labelV, nil),
VAPRxLatAvg: nd(ns+"vap_receive_latency_average_seconds", "VAP Latency Average Rx", labelV, nil),
VAPRxLatMax: nd(ns+"vap_receive_latency_maximum_seconds", "VAP Latency Maximum Rx", labelV, nil),
VAPRxLatMin: nd(ns+"vap_receive_latency_minimum_seconds", "VAP Latency Minimum Rx", labelV, nil),
VAPWifiTxLatencyMovAvg: nd(ns+"vap_transmit_latency_moving_avg_seconds", "VAP Latency Moving Avg Tx", labelV, nil),
VAPWifiTxLatencyMovMax: nd(ns+"vap_transmit_latency_moving_max_seconds", "VAP Latency Moving Min Tx", labelV, nil),
VAPWifiTxLatencyMovMin: nd(ns+"vap_transmit_latency_moving_min_seconds", "VAP Latency Moving Max Tx", labelV, nil),
VAPWifiTxLatencyMovTotal: nd(ns+"vap_transmit_latency_moving_total", "VAP Latency Moving Total Tramsit", labelV, nil),
VAPWifiTxLatencyMovCount: nd(ns+"vap_transmit_latency_moving_count", "VAP Latency Moving Count Tramsit", labelV, nil),
// N each - 1 per Radio. 1-4 radios per AP usually
RadioCurrentAntennaGain: prometheus.NewDesc(ns+"radio_current_antenna_gain", "Radio Current Antenna Gain", labelR, nil),
RadioHt: prometheus.NewDesc(ns+"radio_ht", "Radio HT", labelR, nil),
RadioMaxTxpower: prometheus.NewDesc(ns+"radio_max_transmit_power", "Radio Maximum Transmit Power", labelR, nil),
RadioMinTxpower: prometheus.NewDesc(ns+"radio_min_transmit_power", "Radio Minimum Transmit Power", labelR, nil),
RadioNss: prometheus.NewDesc(ns+"radio_nss", "Radio Nss", labelR, nil),
RadioRadioCaps: prometheus.NewDesc(ns+"radio_caps", "Radio Capabilities", labelR, nil),
RadioTxPower: prometheus.NewDesc(ns+"radio_transmit_power", "Radio Transmit Power", labelR, nil),
RadioAstBeXmit: prometheus.NewDesc(ns+"radio_ast_be_xmit", "Radio AstBe Transmit", labelR, nil),
RadioChannel: prometheus.NewDesc(ns+"radio_channel", "Radio Channel", labelR, nil),
RadioCuSelfRx: prometheus.NewDesc(ns+"radio_channel_utilization_receive_ratio", "Radio Channel Utilization Receive", labelR, nil),
RadioCuSelfTx: prometheus.NewDesc(ns+"radio_channel_utilization_transmit_ratio", "Radio Channel Utilization Transmit", labelR, nil),
RadioExtchannel: prometheus.NewDesc(ns+"radio_ext_channel", "Radio Ext Channel", labelR, nil),
RadioGain: prometheus.NewDesc(ns+"radio_gain", "Radio Gain", labelR, nil),
RadioNumSta: prometheus.NewDesc(ns+"radio_stations", "Radio Total Station Count", append(labelR, "station_type"), nil),
RadioTxPackets: prometheus.NewDesc(ns+"radio_transmit_packets", "Radio Transmitted Packets", labelR, nil),
RadioTxRetries: prometheus.NewDesc(ns+"radio_transmit_retries", "Radio Transmit Retries", labelR, nil),
RadioCurrentAntennaGain: nd(ns+"radio_current_antenna_gain", "Radio Current Antenna Gain", labelR, nil),
RadioHt: nd(ns+"radio_ht", "Radio HT", labelR, nil),
RadioMaxTxpower: nd(ns+"radio_max_transmit_power", "Radio Maximum Transmit Power", labelR, nil),
RadioMinTxpower: nd(ns+"radio_min_transmit_power", "Radio Minimum Transmit Power", labelR, nil),
RadioNss: nd(ns+"radio_nss", "Radio Nss", labelR, nil),
RadioRadioCaps: nd(ns+"radio_caps", "Radio Capabilities", labelR, nil),
RadioTxPower: nd(ns+"radio_transmit_power", "Radio Transmit Power", labelR, nil),
RadioAstBeXmit: nd(ns+"radio_ast_be_xmit", "Radio AstBe Transmit", labelR, nil),
RadioChannel: nd(ns+"radio_channel", "Radio Channel", labelR, nil),
RadioCuSelfRx: nd(ns+"radio_channel_utilization_receive_ratio", "Channel Utilization Rx", labelR, nil),
RadioCuSelfTx: nd(ns+"radio_channel_utilization_transmit_ratio", "Channel Utilization Tx", labelR, nil),
RadioExtchannel: nd(ns+"radio_ext_channel", "Radio Ext Channel", labelR, nil),
RadioGain: nd(ns+"radio_gain", "Radio Gain", labelR, nil),
RadioNumSta: nd(ns+"radio_stations", "Radio Total Station Count", append(labelR, "station_type"), nil),
RadioTxPackets: nd(ns+"radio_transmit_packets", "Radio Transmitted Packets", labelR, nil),
RadioTxRetries: nd(ns+"radio_transmit_retries", "Radio Transmit Retries", labelR, nil),
}
}
@ -162,7 +164,8 @@ func (u *promUnifi) exportUAP(r report, d *unifi.UAP) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
u.exportVAPtable(r, labels, d.VapTable)
@ -181,8 +184,9 @@ func (u *promUnifi) exportUAPstats(r report, labels []string, ap *unifi.Ap, byte
if ap == nil {
return
}
labelU := []string{"user", labels[1], labels[2]}
labelG := []string{"guest", labels[1], labels[2]}
labelU := []string{"user", labels[1], labels[2], labels[3]}
labelG := []string{"guest", labels[1], labels[2], labels[3]}
r.send([]*metric{
// ap only stuff.
{u.Device.BytesD, counter, bytes[0], labels}, // not sure if these 3 Ds are counters or gauges.
@ -229,8 +233,8 @@ func (u *promUnifi) exportVAPtable(r report, labels []string, vt unifi.VapTable)
if !v.Up.Val {
continue
}
labelV := []string{v.Name, v.Bssid, v.Radio, v.RadioName, v.Essid, v.Usage, labels[1], labels[2]}
labelV := []string{v.Name, v.Bssid, v.Radio, v.RadioName, v.Essid, v.Usage, labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.UAP.VAPCcq, gauge, float64(v.Ccq) / 1000.0, labelV},
{u.UAP.VAPMacFilterRejections, counter, v.MacFilterRejections, labelV},
@ -277,9 +281,10 @@ func (u *promUnifi) exportVAPtable(r report, labels []string, vt unifi.VapTable)
func (u *promUnifi) exportRADtable(r report, labels []string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
// radio table
for _, p := range rt {
labelR := []string{p.Name, p.Radio, labels[1], labels[2]}
labelR := []string{p.Name, p.Radio, labels[1], labels[2], labels[3]}
labelRUser := append(labelR, "user")
labelRGuest := append(labelR, "guest")
r.send([]*metric{
{u.UAP.RadioCurrentAntennaGain, gauge, p.CurrentAntennaGain, labelR},
{u.UAP.RadioHt, gauge, p.Ht, labelR},
@ -294,6 +299,7 @@ func (u *promUnifi) exportRADtable(r report, labels []string, rt unifi.RadioTabl
if t.Name != p.Name {
continue
}
r.send([]*metric{
{u.UAP.RadioTxPower, gauge, t.TxPower, labelR},
{u.UAP.RadioAstBeXmit, gauge, t.AstBeXmit, labelR},
@ -307,6 +313,7 @@ func (u *promUnifi) exportRADtable(r report, labels []string, rt unifi.RadioTabl
{u.UAP.RadioTxPackets, gauge, t.TxPackets, labelR},
{u.UAP.RadioTxRetries, gauge, t.TxRetries, labelR},
})
break
}
}

View File

@ -31,8 +31,9 @@ type unifiDevice struct {
}
func descDevice(ns string) *unifiDevice {
labels := []string{"type", "site_name", "name"}
labels := []string{"type", "site_name", "name", "source"}
infoLabels := []string{"version", "model", "serial", "mac", "ip", "id", "bytes", "uptime"}
return &unifiDevice{
Info: prometheus.NewDesc(ns+"info", "Device Information", append(labels, infoLabels...), nil),
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Device Uptime", labels, nil),
@ -63,7 +64,8 @@ func (u *promUnifi) exportUDM(r report, d *unifi.UDM) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
// Shared data (all devices do this).
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
@ -80,6 +82,7 @@ func (u *promUnifi) exportUDM(r report, d *unifi.UDM) {
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
// Wireless Data - UDM (non-pro) only
if d.Stat.Ap != nil && d.VapTable != nil {
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
@ -103,6 +106,7 @@ func (u *promUnifi) exportSTAcount(r report, labels []string, stas ...unifi.Flex
{u.Device.Counter, gauge, stas[0], append(labels, "user")},
{u.Device.Counter, gauge, stas[1], append(labels, "guest")},
})
if len(stas) > 2 {
r.send([]*metric{
{u.Device.Counter, gauge, stas[2], append(labels, "desktop")},

View File

@ -36,7 +36,8 @@ type usg struct {
}
func descUSG(ns string) *usg {
labels := []string{"port", "site_name", "name"}
labels := []string{"port", "site_name", "name", "source"}
return &usg{
WanRxPackets: prometheus.NewDesc(ns+"wan_receive_packets_total", "WAN Receive Packets Total", labels, nil),
WanRxBytes: prometheus.NewDesc(ns+"wan_receive_bytes_total", "WAN Receive Bytes Total", labels, nil),
@ -72,8 +73,10 @@ func (u *promUnifi) exportUSG(r report, d *unifi.USG) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
// Gateway System Data.
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
@ -91,8 +94,10 @@ func (u *promUnifi) exportUSGstats(r report, labels []string, gw *unifi.Gw, st u
if gw == nil {
return
}
labelLan := []string{"lan", labels[1], labels[2]}
labelWan := []string{"all", labels[1], labels[2]}
labelLan := []string{"lan", labels[1], labels[2], labels[3]}
labelWan := []string{"all", labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.USG.LanRxPackets, counter, gw.LanRxPackets, labelLan},
{u.USG.LanRxBytes, counter, gw.LanRxBytes, labelLan},
@ -115,7 +120,9 @@ func (u *promUnifi) exportWANPorts(r report, labels []string, wans ...unifi.Wan)
if !wan.Up.Val {
continue // only record UP interfaces.
}
labelWan := []string{wan.Name, labels[1], labels[2]}
labelWan := []string{wan.Name, labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.USG.WanRxPackets, counter, wan.RxPackets, labelWan},
{u.USG.WanRxBytes, counter, wan.RxBytes, labelWan},

View File

@ -47,46 +47,48 @@ type usw struct {
func descUSW(ns string) *usw {
pns := ns + "port_"
labelS := []string{"site_name", "name"}
labelP := []string{"port_id", "port_num", "port_name", "port_mac", "port_ip", "site_name", "name"}
labelS := []string{"site_name", "name", "source"}
labelP := []string{"port_id", "port_num", "port_name", "port_mac", "port_ip", "site_name", "name", "source"}
nd := prometheus.NewDesc
return &usw{
// This data may be derivable by sum()ing the port data.
SwRxPackets: prometheus.NewDesc(ns+"switch_receive_packets_total", "Switch Packets Received Total", labelS, nil),
SwRxBytes: prometheus.NewDesc(ns+"switch_receive_bytes_total", "Switch Bytes Received Total", labelS, nil),
SwRxErrors: prometheus.NewDesc(ns+"switch_receive_errors_total", "Switch Errors Received Total", labelS, nil),
SwRxDropped: prometheus.NewDesc(ns+"switch_receive_dropped_total", "Switch Dropped Received Total", labelS, nil),
SwRxCrypts: prometheus.NewDesc(ns+"switch_receive_crypts_total", "Switch Crypts Received Total", labelS, nil),
SwRxFrags: prometheus.NewDesc(ns+"switch_receive_frags_total", "Switch Frags Received Total", labelS, nil),
SwTxPackets: prometheus.NewDesc(ns+"switch_transmit_packets_total", "Switch Packets Transmit Total", labelS, nil),
SwTxBytes: prometheus.NewDesc(ns+"switch_transmit_bytes_total", "Switch Bytes Transmit Total", labelS, nil),
SwTxErrors: prometheus.NewDesc(ns+"switch_transmit_errors_total", "Switch Errors Transmit Total", labelS, nil),
SwTxDropped: prometheus.NewDesc(ns+"switch_transmit_dropped_total", "Switch Dropped Transmit Total", labelS, nil),
SwTxRetries: prometheus.NewDesc(ns+"switch_transmit_retries_total", "Switch Retries Transmit Total", labelS, nil),
SwRxMulticast: prometheus.NewDesc(ns+"switch_receive_multicast_total", "Switch Multicast Receive Total", labelS, nil),
SwRxBroadcast: prometheus.NewDesc(ns+"switch_receive_broadcast_total", "Switch Broadcast Receive Total", labelS, nil),
SwTxMulticast: prometheus.NewDesc(ns+"switch_transmit_multicast_total", "Switch Multicast Transmit Total", labelS, nil),
SwTxBroadcast: prometheus.NewDesc(ns+"switch_transmit_broadcast_total", "Switch Broadcast Transmit Total", labelS, nil),
SwBytes: prometheus.NewDesc(ns+"switch_bytes_total", "Switch Bytes Transferred Total", labelS, nil),
SwRxPackets: nd(ns+"switch_receive_packets_total", "Switch Packets Received Total", labelS, nil),
SwRxBytes: nd(ns+"switch_receive_bytes_total", "Switch Bytes Received Total", labelS, nil),
SwRxErrors: nd(ns+"switch_receive_errors_total", "Switch Errors Received Total", labelS, nil),
SwRxDropped: nd(ns+"switch_receive_dropped_total", "Switch Dropped Received Total", labelS, nil),
SwRxCrypts: nd(ns+"switch_receive_crypts_total", "Switch Crypts Received Total", labelS, nil),
SwRxFrags: nd(ns+"switch_receive_frags_total", "Switch Frags Received Total", labelS, nil),
SwTxPackets: nd(ns+"switch_transmit_packets_total", "Switch Packets Transmit Total", labelS, nil),
SwTxBytes: nd(ns+"switch_transmit_bytes_total", "Switch Bytes Transmit Total", labelS, nil),
SwTxErrors: nd(ns+"switch_transmit_errors_total", "Switch Errors Transmit Total", labelS, nil),
SwTxDropped: nd(ns+"switch_transmit_dropped_total", "Switch Dropped Transmit Total", labelS, nil),
SwTxRetries: nd(ns+"switch_transmit_retries_total", "Switch Retries Transmit Total", labelS, nil),
SwRxMulticast: nd(ns+"switch_receive_multicast_total", "Switch Multicast Receive Total", labelS, nil),
SwRxBroadcast: nd(ns+"switch_receive_broadcast_total", "Switch Broadcast Receive Total", labelS, nil),
SwTxMulticast: nd(ns+"switch_transmit_multicast_total", "Switch Multicast Transmit Total", labelS, nil),
SwTxBroadcast: nd(ns+"switch_transmit_broadcast_total", "Switch Broadcast Transmit Total", labelS, nil),
SwBytes: nd(ns+"switch_bytes_total", "Switch Bytes Transferred Total", labelS, nil),
// per-port data
PoeCurrent: prometheus.NewDesc(pns+"poe_amperes", "POE Current", labelP, nil),
PoePower: prometheus.NewDesc(pns+"poe_watts", "POE Power", labelP, nil),
PoeVoltage: prometheus.NewDesc(pns+"poe_volts", "POE Voltage", labelP, nil),
RxBroadcast: prometheus.NewDesc(pns+"receive_broadcast_total", "Receive Broadcast", labelP, nil),
RxBytes: prometheus.NewDesc(pns+"receive_bytes_total", "Total Receive Bytes", labelP, nil),
RxBytesR: prometheus.NewDesc(pns+"receive_rate_bytes", "Receive Bytes Rate", labelP, nil),
RxDropped: prometheus.NewDesc(pns+"receive_dropped_total", "Total Receive Dropped", labelP, nil),
RxErrors: prometheus.NewDesc(pns+"receive_errors_total", "Total Receive Errors", labelP, nil),
RxMulticast: prometheus.NewDesc(pns+"receive_multicast_total", "Total Receive Multicast", labelP, nil),
RxPackets: prometheus.NewDesc(pns+"receive_packets_total", "Total Receive Packets", labelP, nil),
Satisfaction: prometheus.NewDesc(pns+"satisfaction_ratio", "Satisfaction", labelP, nil),
Speed: prometheus.NewDesc(pns+"port_speed_bps", "Speed", labelP, nil),
TxBroadcast: prometheus.NewDesc(pns+"transmit_broadcast_total", "Total Transmit Broadcast", labelP, nil),
TxBytes: prometheus.NewDesc(pns+"transmit_bytes_total", "Total Transmit Bytes", labelP, nil),
TxBytesR: prometheus.NewDesc(pns+"transmit_rate_bytes", "Transmit Bytes Rate", labelP, nil),
TxDropped: prometheus.NewDesc(pns+"transmit_dropped_total", "Total Transmit Dropped", labelP, nil),
TxErrors: prometheus.NewDesc(pns+"transmit_errors_total", "Total Transmit Errors", labelP, nil),
TxMulticast: prometheus.NewDesc(pns+"transmit_multicast_total", "Total Tranmist Multicast", labelP, nil),
TxPackets: prometheus.NewDesc(pns+"transmit_packets_total", "Total Transmit Packets", labelP, nil),
PoeCurrent: nd(pns+"poe_amperes", "POE Current", labelP, nil),
PoePower: nd(pns+"poe_watts", "POE Power", labelP, nil),
PoeVoltage: nd(pns+"poe_volts", "POE Voltage", labelP, nil),
RxBroadcast: nd(pns+"receive_broadcast_total", "Receive Broadcast", labelP, nil),
RxBytes: nd(pns+"receive_bytes_total", "Total Receive Bytes", labelP, nil),
RxBytesR: nd(pns+"receive_rate_bytes", "Receive Bytes Rate", labelP, nil),
RxDropped: nd(pns+"receive_dropped_total", "Total Receive Dropped", labelP, nil),
RxErrors: nd(pns+"receive_errors_total", "Total Receive Errors", labelP, nil),
RxMulticast: nd(pns+"receive_multicast_total", "Total Receive Multicast", labelP, nil),
RxPackets: nd(pns+"receive_packets_total", "Total Receive Packets", labelP, nil),
Satisfaction: nd(pns+"satisfaction_ratio", "Satisfaction", labelP, nil),
Speed: nd(pns+"port_speed_bps", "Speed", labelP, nil),
TxBroadcast: nd(pns+"transmit_broadcast_total", "Total Transmit Broadcast", labelP, nil),
TxBytes: nd(pns+"transmit_bytes_total", "Total Transmit Bytes", labelP, nil),
TxBytesR: nd(pns+"transmit_rate_bytes", "Transmit Bytes Rate", labelP, nil),
TxDropped: nd(pns+"transmit_dropped_total", "Total Transmit Dropped", labelP, nil),
TxErrors: nd(pns+"transmit_errors_total", "Total Transmit Errors", labelP, nil),
TxMulticast: nd(pns+"transmit_multicast_total", "Total Tranmist Multicast", labelP, nil),
TxPackets: nd(pns+"transmit_packets_total", "Total Transmit Packets", labelP, nil),
}
}
@ -94,8 +96,10 @@ func (u *promUnifi) exportUSW(r report, d *unifi.USW) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
u.exportUSWstats(r, labels, d.Stat.Sw)
u.exportPRTtable(r, labels, d.PortTable)
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
@ -105,13 +109,16 @@ func (u *promUnifi) exportUSW(r report, d *unifi.USW) {
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
// Switch System Data.
if d.HasTemperature.Val {
r.send([]*metric{{u.Device.Temperature, gauge, d.GeneralTemperature, labels}})
}
if d.HasFan.Val {
r.send([]*metric{{u.Device.FanLevel, gauge, d.FanLevel, labels}})
}
if d.TotalMaxPower.Txt != "" {
r.send([]*metric{{u.Device.TotalMaxPower, gauge, d.TotalMaxPower, labels}})
}
@ -122,7 +129,9 @@ func (u *promUnifi) exportUSWstats(r report, labels []string, sw *unifi.Sw) {
if sw == nil {
return
}
labelS := labels[1:]
r.send([]*metric{
{u.USW.SwRxPackets, counter, sw.RxPackets, labelS},
{u.USW.SwRxBytes, counter, sw.RxBytes, labelS},
@ -150,8 +159,11 @@ func (u *promUnifi) exportPRTtable(r report, labels []string, pt []unifi.Port) {
if !p.Up.Val || !p.Enable.Val {
continue
}
// Copy labels, and add four new ones.
labelP := []string{labels[2] + " Port " + p.PortIdx.Txt, p.PortIdx.Txt, p.Name, p.Mac, p.IP, labels[1], labels[2]}
labelP := []string{labels[2] + " Port " + p.PortIdx.Txt, p.PortIdx.Txt,
p.Name, p.Mac, p.IP, labels[1], labels[2], labels[3]}
if p.PoeEnable.Val && p.PortPoe.Val {
r.send([]*metric{
{u.USW.PoeCurrent, gauge, p.PoeCurrent, labelP},

26
plugins/mysql/README.md Normal file
View File

@ -0,0 +1,26 @@
# MYSQL Output Plugin Example
The code here, and the dynamic plugin provided shows an example of how you can
write your own output for unifi-poller. This plugin records some very basic
data about clients on a unifi network into a mysql database.
You could write outputs that do... anything. An example: They could compare current
connected clients to a previous list (in a db, or stored in memory), and send a
notification if it changes. The possibilities are endless.
You must compile your plugin using the unifi-poller source for the version you're
using. In other words, to build a plugin for version 2.0.1, do this:
```
mkdir -p $GOPATH/src/github.com/davidnewhall
cd $GOPATH/src/github.com/davidnewhall
git clone git@github.com:davidnewhall/unifi-poller.git
cd unifi-poller
git checkout v2.0.1
make vendor
cp -r <your plugin> plugins/
GOOS=linux make plugins
```
The plugin you copy in *must* have a `main.go` file for `make plugins` to build it.

45
plugins/mysql/main.go Normal file
View File

@ -0,0 +1,45 @@
package main
import (
"fmt"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"golift.io/cnfg"
)
// mysqlConfig represents the data that is unmarshalled from the up.conf config file for this plugins.
type mysqlConfig struct {
Interval cnfg.Duration `json:"interval" toml:"interval" xml:"interval" yaml:"interval"`
Host string `json:"host" toml:"host" xml:"host" yaml:"host"`
User string `json:"user" toml:"user" xml:"user" yaml:"user"`
Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
DB string `json:"db" toml:"db" xml:"db" yaml:"db"`
Table string `json:"table" toml:"table" xml:"table" yaml:"table"`
// Maps do not work with ENV VARIABLES yet, but may in the future.
Fields []string `json:"fields" toml:"fields" xml:"field" yaml:"fields"`
}
// Pointers are ignored during ENV variable unmarshal, avoid pointers to your config.
// Only capital (exported) members are unmarshaled when passed into poller.NewOutput().
type plugin struct {
Config mysqlConfig `json:"mysql" toml:"mysql" xml:"mysql" yaml:"mysql"`
}
func init() {
u := &plugin{Config: mysqlConfig{}}
poller.NewOutput(&poller.Output{
Name: "mysql",
Config: u, // pass in the struct *above* your config (so it can see the struct tags).
Method: u.Run,
})
}
func main() {
fmt.Println("this is a unifi-poller plugin; not an application")
}
func (a *plugin) Run(c poller.Collect) error {
c.Logf("mysql plugin is not finished")
return nil
}