This commit is contained in:
davidnewhall2 2019-12-28 16:58:38 -08:00
parent 9b7e13e17a
commit cf83edf4b3
68 changed files with 11 additions and 5040 deletions

View File

@ -1,30 +0,0 @@
/up.conf
/unifi-poller
/unifi-poller*.gz
/unifi-poller*.zip
/unifi-poller*.1
/unifi-poller*.deb
/unifi-poller*.rpm
/unifi-poller.*.arm
/unifi-poller.exe
/unifi-poller.*.macos
/unifi-poller.*.linux
/unifi-poller.rb
*.sha256
/vendor
.DS_Store
*~
/package_build_*
/release
MANUAL
MANUAL.html
README
README.html
/unifi-poller_manual.html
/homebrew_release_repo
/.metadata.make
bitly_token
github_deploy_key
gpg.signing.key
.secret-files.tar
*.so

View File

@ -1,44 +0,0 @@
# Each line must have an export clause.
# This file is parsed and sourced by the Makefile, Docker and Homebrew builds.
# Powered by Application Builder: https://github.com/golift/application-builder
# Must match the repo name.
BINARY="unifi-poller"
# github username
GHUSER="davidnewhall"
# Github repo containing homebrew formula repo.
HBREPO="golift/homebrew-mugs"
MAINT="David Newhall II <david at sleepers dot pro>"
VENDOR="Go Lift <code at golift dot io>"
DESC="Polls a UniFi controller, exports metrics to InfluxDB and Prometheus"
GOLANGCI_LINT_ARGS="--enable-all -D gochecknoglobals -D funlen -e G402 -D gochecknoinits"
# Example must exist at examples/$CONFIG_FILE.example
CONFIG_FILE="up.conf"
LICENSE="MIT"
# FORMULA is either 'service' or 'tool'. Services run as a daemon, tools do not.
# This affects the homebrew formula (launchd) and linux packages (systemd).
FORMULA="service"
export BINARY GHUSER HBREPO MAINT VENDOR DESC GOLANGCI_LINT_ARGS CONFIG_FILE LICENSE FORMULA
# The rest is mostly automatic.
# Fix the repo if it doesn't match the binary name.
# Provide a better URL if one exists.
# Used for source links and wiki links.
SOURCE_URL="https://github.com/${GHUSER}/${BINARY}"
# Used for documentation links.
URL="${SOURCE_URL}"
# Dynamic. Recommend not changing.
VVERSION=$(git describe --abbrev=0 --tags $(git rev-list --tags --max-count=1))
VERSION="$(echo $VVERSION | tr -d v | grep -E '^\S+$' || echo development)"
# This produces a 0 in some envirnoments (like Homebrew), but it's only used for packages.
ITERATION=$(git rev-list --count --all || echo 0)
DATE="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
COMMIT="$(git rev-parse --short HEAD || echo 0)"
# This is a custom download path for homebrew formula.
SOURCE_PATH=https://golift.io/${BINARY}/archive/v${VERSION}.tar.gz
export SOURCE_URL URL VVERSION VERSION ITERATION DATE COMMIT SOURCE_PATH

View File

@ -1,84 +0,0 @@
# Powered by Application Builder: https://github.com/golift/application-builder
language: go
git:
depth: false
addons:
apt:
packages:
- ruby-dev
- rpm
- build-essential
- git
- libgnome-keyring-dev
- fakeroot
- zip
- debsigs
- gnupg
- expect
go:
- 1.13.x
services:
- docker
install:
- mkdir -p $GOPATH/bin
# Download the `dep` binary to bin folder in $GOPATH
- curl -sLo $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v0.5.3/dep-linux-amd64
- chmod +x $GOPATH/bin/dep
# download super-linter: golangci-lint
- curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin latest
- rvm install 2.0.0
- rvm 2.0.0 do gem install --no-document fpm
before_script:
- gpg --import gpg.public.key
# Create your own deploy key, tar it, and encrypt the file to make this work. Optionally add a bitly_token file to the archive.
- openssl aes-256-cbc -K $encrypted_9f3147001275_key -iv $encrypted_9f3147001275_iv -in .secret-files.tar.enc -out .secret-files.tar -d
- tar -xf .secret-files.tar
- gpg --import gpg.signing.key
- rm -f gpg.signing.key .secret-files.tar
- source .metadata.sh
- make vendor
script:
# Test Go and Docker.
- make test
- make docker
# Test built docker image.
- docker run $BINARY -v 2>&1 | grep -Eq "^$BINARY v$VERSION"
# Build everything
- rvm 2.0.0 do make release
after_success:
# Display Release Folder
- ls -l release/
# Setup the ssh client so we can clone and push to the homebrew formula repo.
# You must put github_deploy_file into .secret_files.tar.enc
# This is an ssh key added to your homebrew forumla repo.
- |
mkdir -p $HOME/.ssh
declare -r SSH_FILE="$(mktemp -u $HOME/.ssh/XXXXX)"
echo -e "Host github.com\n\tStrictHostKeyChecking no\n" >> $HOME/.ssh/config
[ ! -f github_deploy_key ] || (mv github_deploy_key $SSH_FILE \
&& chmod 600 "$SSH_FILE" \
&& printf "%s\n" \
"Host github.com" \
" IdentityFile $SSH_FILE" \
" StrictHostKeyChecking no" \
" LogLevel ERROR" >> $HOME/.ssh/config)
deploy:
- provider: releases
api_key:
secure: GsvW0m+EnRELQMk8DjH63VXinqbwse4FJ4vNUslOE6CZ8PBXPrH0ZgaI7ic/uxRtm7CYj0sir4CZq62W5l6uhoXCCQfjOnmJspqnQcrFZ1xRdWktsNXaRwM6hlzaUThsJ/1PD9Psc66uKXBYTg0IlUz0yjZAZk7tCUE4libuj41z40ZKxUcbfcNvH4Njc9IpNB4QSA3ss+a9/6ZwBz4tHVamsGIrzaE0Zf99ItNBYvaOwhM2rC/NWIsFmwt8w4rIA2NIrkZgMDV+Z2Niqh4JRLAWCQNx/RjC5U52lG2yhqivUC3TromZ+q4O4alUltsyIzF2nVanLWgJmbeFo8uXT5A+gd3ovSkFLU9medXd9i4kap7kN/o5m9p5QZvrdEYHEmIU4ml5rjT2EQQVy5CtSmpiRAbhpEJIvA1wDtRq8rdz8IVfJXkHNjg2XdouNmMMWqa3OkEPw21+uxsqv4LscW/6ZjsavzL5SSdnBRU9n79EfGJE/tJLKiNumah/vLuJ5buNhgqmCdtX/Tg+DhQS1BOyYg4l4L8s9IIKZgFRwrOPsZnA/KsrWg4ZsjJ87cqKCaT/qs2EJx5odZcZWJYLBngeO8Tc6cQtLgJdieY2oEKo51Agq4rgikZDt21m6TY9/R5lPN0piwdpy3ZGKfv1ijXx74raMT03qskputzMCvc=
overwrite: true
skip_cleanup: true
file_glob: true
file: release/*
on:
tags: true
- provider: script
script: scripts/formula-deploy.sh
on:
tags: true
- provider: script
script: scripts/package-deploy.sh
skip_cleanup: true
on:
all_branches: true
condition: $TRAVIS_BRANCH =~ ^(master|v[0-9.]+)$

View File

@ -1,73 +0,0 @@
_This doc is far from complete._
# Build Pipeline
Lets talk about how the software gets built for our users before we talk about
making changes to it.
## TravisCI
This repo is tested, built and deployed by [Travis-CI](https://travis-ci.org/davidnewhall/unifi-poller).
The [.travis.yml](.travis.yml) file in this repo coordinates the entire process.
As long as this document is kept up to date, this is what the travis file does:
- Creates a go-capable build environment on a Linux host, some debian variant.
- Install ruby-devel to get rubygems.
- Installs other build tools including rpm and fpm from rubygems.
- Starts docker, builds the docker container and runs it.
- Tests that the Docker container ran and produced expected output.
- Makes a release. `make release`: This does a lot of things, controlled by the [Makefile](Makefile).
- Runs go tests and go linters.
- Compiles the application binaries for Windows, Linux and macOS.
- Compiles a man page that goes into the packages.
- Creates rpm and deb packages using fpm.
- Puts the packages, gzipped binaries and files containing the SHA256s of each asset into a release folder.
After the release is built and Docker image tested:
- Deploys the release assets to the tagged release on GitHub using an encrypted GitHub Token (api key).
- Runs [another script](scripts/formula-deploy.sh) to create and upload a Homebrew formula to [golift/homebrew-mugs](https://github.com/golift/homebrew-mugs).
- Uses an encrypted SSH key to upload the updated formula to the repo.
- Travis does nothing else with Docker; it just makes sure the thing compiles and runs.
### Homebrew
it's a mac thing.
[Homebrew](https://brew.sh) is all I use at home. Please don't break the homebrew
formula stuff; it took a lot of pain to get it just right. I am very interested
in how it works for you.
### Docker
Docker is built automatically by Docker Cloud using the Dockerfile in the path
[init/docker/Dockerfile](init/docker/Dockerfile). Some of the configuration is
done in the Cloud service under my personal account `golift`, but the majority
happens in the build files in the [init/docker/hooks/](init/docker/hooks/) directory.
If you have need to change the Dockerfile, please clearly explain what problem your
changes are solving, and how it has been tested and validated. As far as I'm
concerned this file should never need to change again, but I'm not a Docker expert;
you're welcome to prove me wrong.
# Contributing
Make a pull request and tell me what you're fixing. Pretty simple. If I need to
I'll add more "rules." For now I'm happy to have help. Thank you!
## Wiki
**If you see typos, errors, omissions, etc, please fix them.**
At this point, the wiki is pretty solid. Please keep your edits brief and without
too much opinion. If you want to provide a way to do something, please also provide
any alternatives you're aware of. If you're not sure, just open an issue and we can
hash it out. I'm reasonable.
## UniFi Library
If you're trying to fix something in the UniFi data collection (ie. you got an
unmarshal error, or you want to add something I didn't include) then you
should look at the [UniFi library](https://github.com/golift/unifi). All the
data collection and export code lives there. Contributions and Issues are welcome
on that code base as well.

View File

@ -1,22 +0,0 @@
MIT LICENSE.
Copyright (c) 2016 Garrett Bjerkhoel
Copyright (c) 2018-2019 David Newhall II
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,311 +0,0 @@
# This Makefile is written as generic as possible.
# Setting the variables in .metadata.sh and creating the paths in the repo makes this work.
# See more: https://github.com/golift/application-builder
# Suck in our application information.
IGNORED:=$(shell bash -c "source .metadata.sh ; env | sed 's/=/:=/;s/^/export /' > .metadata.make")
# md2roff turns markdown into man files and html files.
MD2ROFF_BIN=github.com/github/hub/md2roff-bin
# Travis CI passes the version in. Local builds get it from the current git tag.
ifeq ($(VERSION),)
include .metadata.make
else
# Preserve the passed-in version & iteration (homebrew).
_VERSION:=$(VERSION)
_ITERATION:=$(ITERATION)
include .metadata.make
VERSION:=$(_VERSION)
ITERATION:=$(_ITERATION)
endif
# rpm is wierd and changes - to _ in versions.
RPMVERSION:=$(shell echo $(VERSION) | tr -- - _)
PACKAGE_SCRIPTS=
ifeq ($(FORMULA),service)
PACKAGE_SCRIPTS=--after-install scripts/after-install.sh --before-remove scripts/before-remove.sh
endif
define PACKAGE_ARGS
$(PACKAGE_SCRIPTS) \
--name $(BINARY) \
--deb-no-default-config-files \
--rpm-os linux \
--iteration $(ITERATION) \
--license $(LICENSE) \
--url $(URL) \
--maintainer "$(MAINT)" \
--vendor "$(VENDOR)" \
--description "$(DESC)" \
--config-files "/etc/$(BINARY)/$(CONFIG_FILE)"
endef
PLUGINS:=$(patsubst plugins/%/main.go,%,$(wildcard plugins/*/main.go))
VERSION_LDFLAGS:= \
-X github.com/prometheus/common/version.Branch=$(TRAVIS_BRANCH) \
-X github.com/prometheus/common/version.BuildDate=$(DATE) \
-X github.com/prometheus/common/version.Revision=$(COMMIT) \
-X github.com/prometheus/common/version.Version=$(VERSION)-$(ITERATION)
# Makefile targets follow.
all: build
# Prepare a release. Called in Travis CI.
release: clean macos windows linux_packages
# Prepareing a release!
mkdir -p $@
mv $(BINARY).*.macos $(BINARY).*.linux $@/
gzip -9r $@/
for i in $(BINARY)*.exe; do zip -9qm $@/$$i.zip $$i;done
mv *.rpm *.deb $@/
# Generating File Hashes
openssl dgst -r -sha256 $@/* | sed 's#release/##' | tee $@/checksums.sha256.txt
# Delete all build assets.
clean:
# Cleaning up.
rm -f $(BINARY) $(BINARY).*.{macos,linux,exe}{,.gz,.zip} $(BINARY).1{,.gz} $(BINARY).rb
rm -f $(BINARY){_,-}*.{deb,rpm} v*.tar.gz.sha256 examples/MANUAL .metadata.make
rm -f cmd/$(BINARY)/README{,.html} README{,.html} ./$(BINARY)_manual.html
rm -rf package_build_* release
# Build a man page from a markdown file using md2roff.
# This also turns the repo readme into an html file.
# md2roff is needed to build the man file and html pages from the READMEs.
man: $(BINARY).1.gz
$(BINARY).1.gz: md2roff
# Building man page. Build dependency first: md2roff
go run $(MD2ROFF_BIN) --manual $(BINARY) --version $(VERSION) --date "$(DATE)" examples/MANUAL.md
gzip -9nc examples/MANUAL > $@
mv examples/MANUAL.html $(BINARY)_manual.html
md2roff:
go get $(MD2ROFF_BIN)
# TODO: provide a template that adds the date to the built html file.
readme: README.html
README.html: md2roff
# This turns README.md into README.html
go run $(MD2ROFF_BIN) --manual $(BINARY) --version $(VERSION) --date "$(DATE)" README.md
# Binaries
build: $(BINARY)
$(BINARY): main.go pkg/*/*.go
go build -o $(BINARY) -ldflags "-w -s $(VERSION_LDFLAGS)"
linux: $(BINARY).amd64.linux
$(BINARY).amd64.linux: main.go pkg/*/*.go
# Building linux 64-bit x86 binary.
GOOS=linux GOARCH=amd64 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
linux386: $(BINARY).i386.linux
$(BINARY).i386.linux: main.go pkg/*/*.go
# Building linux 32-bit x86 binary.
GOOS=linux GOARCH=386 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
arm: arm64 armhf
arm64: $(BINARY).arm64.linux
$(BINARY).arm64.linux: main.go pkg/*/*.go
# Building linux 64-bit ARM binary.
GOOS=linux GOARCH=arm64 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
armhf: $(BINARY).armhf.linux
$(BINARY).armhf.linux: main.go pkg/*/*.go
# Building linux 32-bit ARM binary.
GOOS=linux GOARCH=arm GOARM=6 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
macos: $(BINARY).amd64.macos
$(BINARY).amd64.macos: main.go pkg/*/*.go
# Building darwin 64-bit x86 binary.
GOOS=darwin GOARCH=amd64 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
exe: $(BINARY).amd64.exe
windows: $(BINARY).amd64.exe
$(BINARY).amd64.exe: main.go pkg/*/*.go
# Building windows 64-bit x86 binary.
GOOS=windows GOARCH=amd64 go build -o $@ -ldflags "-w -s $(VERSION_LDFLAGS)"
# Packages
linux_packages: rpm deb rpm386 deb386 debarm rpmarm debarmhf rpmarmhf
rpm: $(BINARY)-$(RPMVERSION)-$(ITERATION).x86_64.rpm
$(BINARY)-$(RPMVERSION)-$(ITERATION).x86_64.rpm: package_build_linux check_fpm
@echo "Building 'rpm' package for $(BINARY) version '$(RPMVERSION)-$(ITERATION)'."
fpm -s dir -t rpm $(PACKAGE_ARGS) -a x86_64 -v $(RPMVERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn rpmsign --key-id=$(SIGNING_KEY) --resign $(BINARY)-$(RPMVERSION)-$(ITERATION).x86_64.rpm; expect -exact \"Enter pass phrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
deb: $(BINARY)_$(VERSION)-$(ITERATION)_amd64.deb
$(BINARY)_$(VERSION)-$(ITERATION)_amd64.deb: package_build_linux check_fpm
@echo "Building 'deb' package for $(BINARY) version '$(VERSION)-$(ITERATION)'."
fpm -s dir -t deb $(PACKAGE_ARGS) -a amd64 -v $(VERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn debsigs --default-key="$(SIGNING_KEY)" --sign=origin $(BINARY)_$(VERSION)-$(ITERATION)_amd64.deb; expect -exact \"Enter passphrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
rpm386: $(BINARY)-$(RPMVERSION)-$(ITERATION).i386.rpm
$(BINARY)-$(RPMVERSION)-$(ITERATION).i386.rpm: package_build_linux_386 check_fpm
@echo "Building 32-bit 'rpm' package for $(BINARY) version '$(RPMVERSION)-$(ITERATION)'."
fpm -s dir -t rpm $(PACKAGE_ARGS) -a i386 -v $(RPMVERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn rpmsign --key-id=$(SIGNING_KEY) --resign $(BINARY)-$(RPMVERSION)-$(ITERATION).i386.rpm; expect -exact \"Enter pass phrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
deb386: $(BINARY)_$(VERSION)-$(ITERATION)_i386.deb
$(BINARY)_$(VERSION)-$(ITERATION)_i386.deb: package_build_linux_386 check_fpm
@echo "Building 32-bit 'deb' package for $(BINARY) version '$(VERSION)-$(ITERATION)'."
fpm -s dir -t deb $(PACKAGE_ARGS) -a i386 -v $(VERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn debsigs --default-key="$(SIGNING_KEY)" --sign=origin $(BINARY)_$(VERSION)-$(ITERATION)_i386.deb; expect -exact \"Enter passphrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
rpmarm: $(BINARY)-$(RPMVERSION)-$(ITERATION).arm64.rpm
$(BINARY)-$(RPMVERSION)-$(ITERATION).arm64.rpm: package_build_linux_arm64 check_fpm
@echo "Building 64-bit ARM8 'rpm' package for $(BINARY) version '$(RPMVERSION)-$(ITERATION)'."
fpm -s dir -t rpm $(PACKAGE_ARGS) -a arm64 -v $(RPMVERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn rpmsign --key-id=$(SIGNING_KEY) --resign $(BINARY)-$(RPMVERSION)-$(ITERATION).arm64.rpm; expect -exact \"Enter pass phrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
debarm: $(BINARY)_$(VERSION)-$(ITERATION)_arm64.deb
$(BINARY)_$(VERSION)-$(ITERATION)_arm64.deb: package_build_linux_arm64 check_fpm
@echo "Building 64-bit ARM8 'deb' package for $(BINARY) version '$(VERSION)-$(ITERATION)'."
fpm -s dir -t deb $(PACKAGE_ARGS) -a arm64 -v $(VERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn debsigs --default-key="$(SIGNING_KEY)" --sign=origin $(BINARY)_$(VERSION)-$(ITERATION)_arm64.deb; expect -exact \"Enter passphrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
rpmarmhf: $(BINARY)-$(RPMVERSION)-$(ITERATION).armhf.rpm
$(BINARY)-$(RPMVERSION)-$(ITERATION).armhf.rpm: package_build_linux_armhf check_fpm
@echo "Building 32-bit ARM6/7 HF 'rpm' package for $(BINARY) version '$(RPMVERSION)-$(ITERATION)'."
fpm -s dir -t rpm $(PACKAGE_ARGS) -a armhf -v $(RPMVERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn rpmsign --key-id=$(SIGNING_KEY) --resign $(BINARY)-$(RPMVERSION)-$(ITERATION).armhf.rpm; expect -exact \"Enter pass phrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
debarmhf: $(BINARY)_$(VERSION)-$(ITERATION)_armhf.deb
$(BINARY)_$(VERSION)-$(ITERATION)_armhf.deb: package_build_linux_armhf check_fpm
@echo "Building 32-bit ARM6/7 HF 'deb' package for $(BINARY) version '$(VERSION)-$(ITERATION)'."
fpm -s dir -t deb $(PACKAGE_ARGS) -a armhf -v $(VERSION) -C $<
[ "$(SIGNING_KEY)" == "" ] || expect -c "spawn debsigs --default-key="$(SIGNING_KEY)" --sign=origin $(BINARY)_$(VERSION)-$(ITERATION)_armhf.deb; expect -exact \"Enter passphrase: \"; send \"$(PRIVATE_KEY)\r\"; expect eof"
# Build an environment that can be packaged for linux.
package_build_linux: readme man plugins_linux_amd64 linux
# Building package environment for linux.
mkdir -p $@/usr/bin $@/etc/$(BINARY) $@/usr/share/man/man1 $@/usr/share/doc/$(BINARY) $@/usr/lib/$(BINARY)
# Copying the binary, config file, unit file, and man page into the env.
cp $(BINARY).amd64.linux $@/usr/bin/$(BINARY)
cp *.1.gz $@/usr/share/man/man1
rm -f $@/usr/lib/$(BINARY)/*.so
cp *amd64.so $@/usr/lib/$(BINARY)/
cp examples/$(CONFIG_FILE).example $@/etc/$(BINARY)/
cp examples/$(CONFIG_FILE).example $@/etc/$(BINARY)/$(CONFIG_FILE)
cp LICENSE *.html examples/*?.?* $@/usr/share/doc/$(BINARY)/
[ "$(FORMULA)" != "service" ] || mkdir -p $@/lib/systemd/system
[ "$(FORMULA)" != "service" ] || \
sed -e "s/{{BINARY}}/$(BINARY)/g" -e "s/{{DESC}}/$(DESC)/g" \
init/systemd/template.unit.service > $@/lib/systemd/system/$(BINARY).service
package_build_linux_386: package_build_linux linux386
mkdir -p $@
cp -r $</* $@/
cp $(BINARY).i386.linux $@/usr/bin/$(BINARY)
package_build_linux_arm64: package_build_linux arm64
mkdir -p $@
cp -r $</* $@/
cp $(BINARY).arm64.linux $@/usr/bin/$(BINARY)
package_build_linux_armhf: package_build_linux armhf
mkdir -p $@
cp -r $</* $@/
cp $(BINARY).armhf.linux $@/usr/bin/$(BINARY)
check_fpm:
@fpm --version > /dev/null || (echo "FPM missing. Install FPM: https://fpm.readthedocs.io/en/latest/installing.html" && false)
docker:
docker build -f init/docker/Dockerfile \
--build-arg "BUILD_DATE=$(DATE)" \
--build-arg "COMMIT=$(COMMIT)" \
--build-arg "VERSION=$(VERSION)-$(ITERATION)" \
--build-arg "LICENSE=$(LICENSE)" \
--build-arg "DESC=$(DESC)" \
--build-arg "URL=$(URL)" \
--build-arg "VENDOR=$(VENDOR)" \
--build-arg "AUTHOR=$(MAINT)" \
--build-arg "BINARY=$(BINARY)" \
--build-arg "SOURCE_URL=$(SOURCE_URL)" \
--build-arg "CONFIG_FILE=$(CONFIG_FILE)" \
--tag $(BINARY) .
# This builds a Homebrew formula file that can be used to install this app from source.
# The source used comes from the released version on GitHub. This will not work with local source.
# This target is used by Travis CI to update the released Forumla when a new tag is created.
formula: $(BINARY).rb
v$(VERSION).tar.gz.sha256:
# Calculate the SHA from the Github source file.
curl -sL $(URL)/archive/v$(VERSION).tar.gz | openssl dgst -r -sha256 | tee $@
$(BINARY).rb: v$(VERSION).tar.gz.sha256 init/homebrew/$(FORMULA).rb.tmpl
# Creating formula from template using sed.
sed -e "s/{{Version}}/$(VERSION)/g" \
-e "s/{{Iter}}/$(ITERATION)/g" \
-e "s/{{SHA256}}/$(shell head -c64 $<)/g" \
-e "s/{{Desc}}/$(DESC)/g" \
-e "s%{{URL}}%$(URL)%g" \
-e "s%{{SOURCE_PATH}}%$(SOURCE_PATH)%g" \
-e "s%{{SOURCE_URL}}%$(SOURCE_URL)%g" \
-e "s%{{CONFIG_FILE}}%$(CONFIG_FILE)%g" \
-e "s%{{Class}}%$(shell echo $(BINARY) | perl -pe 's/(?:\b|-)(\p{Ll})/\u$$1/g')%g" \
init/homebrew/$(FORMULA).rb.tmpl | tee $(BINARY).rb
# That perl line turns hello-world into HelloWorld, etc.
plugins: $(patsubst %,%.so,$(PLUGINS))
$(patsubst %,%.so,$(PLUGINS)):
go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.so,%,$@)
linux_plugins: plugins_linux_amd64 plugins_linux_i386 plugins_linux_arm64 plugins_linux_armhf
plugins_linux_amd64: $(patsubst %,%.linux_amd64.so,$(PLUGINS))
$(patsubst %,%.linux_amd64.so,$(PLUGINS)):
GOOS=linux GOARCH=amd64 go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.linux_amd64.so,%,$@)
plugins_darwin: $(patsubst %,%.darwin.so,$(PLUGINS))
$(patsubst %,%.darwin.so,$(PLUGINS)):
GOOS=darwin go build -o $@ -ldflags "$(VERSION_LDFLAGS)" -buildmode=plugin ./plugins/$(patsubst %.darwin.so,%,$@)
# Extras
# Run code tests and lint.
test: lint
# Testing.
go test -race -covermode=atomic ./...
lint:
# Checking lint.
golangci-lint run $(GOLANGCI_LINT_ARGS)
# This is safe; recommended even.
dep: vendor
vendor: go.mod go.sum
go mod vendor
# Don't run this unless you're ready to debug untested vendored dependencies.
deps: update vendor
update:
go get -u -d
# Homebrew stuff. macOS only.
# Used for Homebrew only. Other distros can create packages.
install: man readme $(BINARY) plugins_darwin
@echo - Done Building! -
@echo - Local installation with the Makefile is only supported on macOS.
@echo If you wish to install the application manually on Linux, check out the wiki: https://$(SOURCE_URL)/wiki/Installation
@echo - Otherwise, build and install a package: make rpm -or- make deb
@echo See the Package Install wiki for more info: https://$(SOURCE_URL)/wiki/Package-Install
@[ "$(shell uname)" = "Darwin" ] || (echo "Unable to continue, not a Mac." && false)
@[ "$(PREFIX)" != "" ] || (echo "Unable to continue, PREFIX not set. Use: make install PREFIX=/usr/local ETC=/usr/local/etc" && false)
@[ "$(ETC)" != "" ] || (echo "Unable to continue, ETC not set. Use: make install PREFIX=/usr/local ETC=/usr/local/etc" && false)
# Copying the binary, config file, unit file, and man page into the env.
/usr/bin/install -m 0755 -d $(PREFIX)/bin $(PREFIX)/share/man/man1 $(ETC)/$(BINARY) $(PREFIX)/share/doc/$(BINARY) $(PREFIX)/lib/$(BINARY)
/usr/bin/install -m 0755 -cp $(BINARY) $(PREFIX)/bin/$(BINARY)
/usr/bin/install -m 0755 -cp *darwin.so $(PREFIX)/lib/$(BINARY)/
/usr/bin/install -m 0644 -cp $(BINARY).1.gz $(PREFIX)/share/man/man1
/usr/bin/install -m 0644 -cp examples/$(CONFIG_FILE).example $(ETC)/$(BINARY)/
[ -f $(ETC)/$(BINARY)/$(CONFIG_FILE) ] || /usr/bin/install -m 0644 -cp examples/$(CONFIG_FILE).example $(ETC)/$(BINARY)/$(CONFIG_FILE)
/usr/bin/install -m 0644 -cp LICENSE *.html examples/* $(PREFIX)/share/doc/$(BINARY)/

View File

@ -1,133 +0,0 @@
<img width="320px" src="https://raw.githubusercontent.com/wiki/davidnewhall/unifi-poller/images/unifi-poller-logo.png">
[![discord](https://badgen.net/badge/icon/Discord?color=0011ff&label&icon=https://simpleicons.now.sh/discord/eee "Ubiquiti Discord")](https://discord.gg/KnyKYt2)
[![twitter](https://badgen.net/twitter/follow/TwitchCaptain?icon=https://simpleicons.now.sh/twitter/0099ff&label=TwitchCaptain&color=0116ff "TwitchCaptain @ Twitter")](https://twitter.com/TwitchCaptain)
[![grafana](https://badgen.net/https/golift.io/bd/grafana/dashboard-downloads/10414,10415,10416,10417,10418,11311,11312,11313,11314,11315?icon=https://simpleicons.now.sh/grafana/ED7F38&color=0011ff "Grafana Dashboard Downloads")](http://grafana.com/dashboards?search=unifi-poller)
[![pulls](https://badgen.net/docker/pulls/golift/unifi-poller?icon=https://simpleicons.now.sh/docker/38B1ED&label=pulls&color=0011ff "Docker Pulls")](https://hub.docker.com/r/golift/unifi-poller)
[![DLs](https://img.shields.io/github/downloads/davidnewhall/unifi-poller/total.svg?logo=github&color=0116ff "GitHub Downloads")](https://www.somsubhra.com/github-release-stats/?username=davidnewhall&repository=unifi-poller)
[![unifi](https://badgen.net/badge/UniFi/5.11.x,5.12.x,UAP,USG,USW,UDM?list=|&icon=https://docs.golift.io/svg/ubiquiti_color.svg&color=0099ee "UniFi Products Supported")](https://github.com/golift/unifi)
[![builer](https://badgen.net/badge/go/Application%20Builder?label=&icon=https://docs.golift.io/svg/go.svg&color=0099ee "Go Application Builder")](https://github.com/golift/application-builder)
[![stars](https://badgen.net/github/stars/davidnewhall/unifi-poller?icon=https://simpleicons.now.sh/macys/fab&label=&color=0099ee "GitHub Stars")](https://github.com/davidnewhall/unifi-poller)
[![travis](https://badgen.net/travis/davidnewhall/unifi-poller?icon=travis&label=build "Travis Build")](https://travis-ci.org/davidnewhall/unifi-poller)
Collect your UniFi controller data and report it to an InfluxDB instance,
or export it for Prometheus collection. Prometheus support is
[new](https://github.com/davidnewhall/unifi-poller/issues/88), and much
of the documentation still needs to be updated; 12/2/2019.
[Ten Grafana Dashboards](http://grafana.com/dashboards?search=unifi-poller)
included; with screenshots. Five for InfluxDB and five for Prometheus.
## Installation
[See the Wiki!](https://github.com/davidnewhall/unifi-poller/wiki/Installation)
We have a special place for [Docker Users](https://github.com/davidnewhall/unifi-poller/wiki/Docker).
I'm willing to help if you have troubles.
Open an [Issue](https://github.com/davidnewhall/unifi-poller/issues) and
we'll figure out how to get things working for you. You can also get help in
the #unifi-poller channel on the [Ubiquiti Discord server](https://discord.gg/KnyKYt2).
I've also [provided a forum post](https://community.ui.com/questions/Unifi-Poller-Store-Unifi-Controller-Metrics-in-InfluxDB-without-SNMP/58a0ea34-d2b3-41cd-93bb-d95d3896d1a1) you may use to get additional help.
## Description
[Ubiquiti](https://www.ui.com) makes networking devices like switches, gateways
(routers) and wireless access points. They have a line of equipment named
[UniFi](https://www.ui.com/products/#unifi) that uses a
[controller](https://www.ui.com/download/unifi/) to keep stats and simplify network
device configuration. This controller can be installed on Windows, macOS and Linux.
Ubiquiti also provides a dedicated hardware device called a
[CloudKey](https://www.ui.com/unifi/unifi-cloud-key/) that runs the controller software. More recently they've developed the Dream Machine; it's still in
beta / early access, but UniFi Poller can collect its data!
UniFi Poller is a small Golang application that runs on Windows, macOS, Linux or
Docker. In Influx-mode it polls a UniFi controller every 30 seconds for
measurements and exports the data to an Influx database. In Prometheus mode the
poller opens a web port and accepts Prometheus polling. It converts the UniFi
Controller API data into Prometheus exports on the fly.
This application requires your controller to be running all the time. If you run
a UniFi controller, there's no excuse not to install
[Influx](https://github.com/davidnewhall/unifi-poller/wiki/InfluxDB) or
[Prometheus](https://prometheus.io),
[Grafana](https://github.com/davidnewhall/unifi-poller/wiki/Grafana) and this app.
You'll have a plethora of data at your fingertips and the ability to craft custom
graphs to slice the data any way you choose. Good luck!
## Backstory
I found a simple piece of code on GitHub that sorta did what I needed;
we all know that story. I wanted more data, so I added more data collection.
I believe I've completely rewritten every piece of original code, except the
copyright/license file and that's fine with me. I probably wouldn't have made
it this far if [Garrett](https://github.com/dewski/unifi) hadn't written the
original code I started with. Many props my man.
The original code pulled only the client data. This app now pulls data
for clients, access points, security gateways, dream machines and switches. I
used to own two UAP-AC-PROs, one USG-3 and one US-24-250W, but have since upgraded
a few devices. Many other users have also provided feedback to improve this app,
and we have reports of it working on nearly every switch, AP and gateway.
## What's this data good for?
I've been trying to get my UAP data into Grafana. Sure, google search that.
You'll find [this](https://community.ubnt.com/t5/UniFi-Wireless/Grafana-dashboard-for-UniFi-APs-now-available/td-p/1833532). What if you don't want to deal with SNMP?
Well, here you go. I've replicated 400% of what you see on those SNMP-powered
dashboards with this Go app running on the same mac as my UniFi controller.
All without enabling SNMP nor trying to understand those OIDs. Mad props
to [waterside](https://community.ubnt.com/t5/user/viewprofilepage/user-id/303058)
for making this dashboard; it gave me a fantastic start to making my own dashboards.
## Operation
You can control this app with puppet, chef, saltstack, homebrew or a simple bash
script if you needed to. Packages are available for macOS, Linux and Docker.
It comes with a systemd service unit that allows you automatically start it up on most Linux hosts.
It works just fine on [Windows](https://github.com/davidnewhall/unifi-poller/wiki/Windows) too.
Most people prefer Docker, and this app is right at home in that environment.
## Development
The UniFi data extraction is provided as an [external library](https://godoc.org/golift.io/unifi),
and you can import that code directly without futzing with this application. That
means, if you wanted to do something like make telegraf collect your data instead
of UniFi Poller you can achieve that with a little bit of Go code. You could write
a small app that acts as a telegraf input plugin using the [unifi](https://github.com/golift/unifi)
library to grab the data from your controller. As a bonus, all of the code in UniFi Poller is
[in libraries](https://godoc.org/github.com/davidnewhall/unifi-poller/pkg)
and can be used in other projects.
## What's it look like?
There are five total dashboards available. Below you'll find screenshots of a few.
##### Client Dashboard (InfluxDB)
![UniFi Clients Dashboard Image](https://grafana.com/api/dashboards/10418/images/6660/image)
##### USG Dashboard (InfluxDB)
![USG Dashboard Image](https://grafana.com/api/dashboards/10416/images/6663/image)
##### UAP Dashboard (InfluxDB)
![UAP Dashboard Image](https://grafana.com/api/dashboards/10415/images/6662/image)
##### USW / Switch Dashboard (InfluxDB)
You can drill down into specific sites, switches, and ports. Compare ports in different
sites side-by-side. So easy! This screenshot barely does it justice.
![USW Dashboard Image](https://grafana.com/api/dashboards/10417/images/6664/image)
## Integrations
The following fine folks are providing their services, completely free! These service
integrations are used for things like storage, building, compiling, distribution and
documentation support. This project succeeds because of them. Thank you!
<p style="text-align: center;">
<a title="Jfrog Bintray" alt="Jfrog Bintray" href="https://bintray.com"><img src="https://docs.golift.io/integrations/bintray.png"/></a>
<a title="GitHub" alt="GitHub" href="https://GitHub.com"><img src="https://docs.golift.io/integrations/octocat.png"/></a>
<a title="Docker Cloud" alt="Docker" href="https://cloud.docker.com"><img src="https://docs.golift.io/integrations/docker.png"/></a>
<a title="Travis-CI" alt="Travis-CI" href="https://Travis-CI.com"><img src="https://docs.golift.io/integrations/travis-ci.png"/></a>
<a title="Homebrew" alt="Homebrew" href="https://brew.sh"><img src="https://docs.golift.io/integrations/homebrew.png"/></a>
<a title="Go Lift" alt="Go Lift" href="https://golift.io"><img src="https://docs.golift.io/integrations/golift.png"/></a>
<a title="Grafana" alt="Grafana" href="https://grafana.com"><img src="https://docs.golift.io/integrations/grafana.png"/></a>
</p>
## Copyright & License
<img style="float: right;" align="right" width="200px" src="https://raw.githubusercontent.com/wiki/davidnewhall/unifi-poller/images/unifi-poller-logo.png">
- Copyright © 2016 Garrett Bjerkhoel.
- Copyright © 2018-2019 David Newhall II.
- See [LICENSE](LICENSE) for license information.

View File

@ -5,7 +5,7 @@ import (
"strings"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/unifi-poller/poller"
"golift.io/unifi"
)

View File

@ -1,101 +0,0 @@
unifi-poller(1) -- Utility to poll UniFi Controller Metrics and store them in InfluxDB
===
SYNOPSIS
---
`unifi-poller -c /etc/unifi-poller.conf`
This daemon polls a UniFi controller at a short interval and stores the collected
measurements in an Influx Database. The measurements and metrics collected belong
to every available site, device and client found on the controller. Including
UniFi Security Gateways, Access Points, Switches and possibly more.
Dashboards for Grafana are available.
Find them at [Grafana.com](https://grafana.com/dashboards?search=unifi-poller).
DESCRIPTION
---
UniFi Poller is a small Golang application that runs on Windows, macOS, Linux or
Docker. It polls a UniFi controller every 30 seconds for measurements and stores
the data in an Influx database. See the example configuration file for more
examples and default configurations.
* See the example configuration file for more examples and default configurations.
OPTIONS
---
`unifi-poller [-c <config-file>] [-j <filter>] [-h] [-v]`
-c, --config <config-file>
Provide a configuration file (instead of the default).
-v, --version
Display version and exit.
-j, --dumpjson <filter>
This is a debug option; use this when you are missing data in your graphs,
and/or you want to inspect the raw data coming from the controller. The
filter accepts three options: devices, clients, other. This will print a
lot of information. Recommend piping it into a file and/or into jq for
better visualization. This requires a valid config file that contains
working authentication details for a UniFi Controller. This only dumps
data for sites listed in the config file. The application exits after
printing the JSON payload; it does not daemonize or report to InfluxDB
with this option. The `other` option is special. This allows you request
any api path. It must be enclosed in quotes with the word other. Example:
unifi-poller -j "other /stat/admins"
-h, --help
Display usage and exit.
CONFIGURATION
---
* Config File Default Location:
* Linux: `/etc/unifi-poller/up.conf`
* macOS: `/usr/local/etc/unifi-poller/up.conf`
* Windows: `C:\ProgramData\unifi-poller\up.conf`
* Config File Default Format: `TOML`
* Possible formats: `XML`, `JSON`, `TOML`, `YAML`
The config file can be written in four different syntax formats. The application
decides which one to use based on the file's name. If it contains `.xml` it will
be parsed as XML. The same goes for `.json` and `.yaml`. If the filename contains
none of these strings, then it is parsed as the default format, TOML. This option
is provided so the application can be easily adapted to any environment.
`Config File Parameters`
Configuration file (up.conf) parameters are documented in the wiki.
* [https://github.com/davidnewhall/unifi-poller/wiki/Configuration](https://github.com/davidnewhall/unifi-poller/wiki/Configuration)
`Shell Environment Parameters`
This application can be fully configured using shell environment variables.
Find documentation for this feature on the Docker Wiki page.
* [https://github.com/davidnewhall/unifi-poller/wiki/Docker](https://github.com/davidnewhall/unifi-poller/wiki/Docker)
GO DURATION
---
This application uses the Go Time Durations for a polling interval.
The format is an integer followed by a time unit. You may append
multiple time units to add them together. A few valid time units are:
ms (millisecond)
s (second)
m (minute)
Example Use: `35s`, `1m`, `1m30s`
AUTHOR
---
* Garrett Bjerkhoel (original code) ~ 2016
* David Newhall II (rewritten) ~ 4/20/2018
* David Newhall II (still going) ~ 6/7/2019
LOCATION
---
* UniFi Poller: [https://github.com/davidnewhall/unifi-poller](https://github.com/davidnewhall/unifi-poller)
* UniFi Library: [https://github.com/golift/unifi](https://github.com/golift/unifi)
* Grafana Dashboards: [https://grafana.com/dashboards?search=unifi-poller](https://grafana.com/dashboards?search=unifi-poller)

View File

@ -1,13 +0,0 @@
# Examples
This folder contains example configuration files in four
supported formats. You can use any format you want for
the config file, just give it the appropriate suffix for
the format. An XML file should end with `.xml`, a JSON
file with `.json`, and YAML with `.yaml`. The default
format is always TOML and may have any _other_ suffix.
#### Dashboards
This folder used to contain Grafana Dashboards.
**They are now located at [Grafana.com](https://grafana.com/dashboards?search=unifi-poller).**
Also see [Grafana Dashboards](https://github.com/davidnewhall/unifi-poller/wiki/Grafana-Dashboards) Wiki.

View File

@ -1,104 +0,0 @@
# UniFi Poller primary configuration file. TOML FORMAT #
########################################################
[poller]
# Turns on line numbers, microsecond logging, and a per-device log.
# The default is false, but I personally leave this on at home (four devices).
# This may be noisy if you have a lot of devices. It adds one line per device.
debug = false
# Turns off per-interval logs. Only startup and error logs will be emitted.
# Recommend enabling debug with this setting for better error logging.
quiet = false
# Load dynamic plugins. Advanced use; only sample mysql plugin provided by default.
plugins = []
#### OUTPUTS
# If you don't use an output, you can disable it.
[prometheus]
disable = false
# This controls on which ip and port /metrics is exported when mode is "prometheus".
# This has no effect in other modes. Must contain a colon and port.
http_listen = "0.0.0.0:9130"
report_errors = false
[influxdb]
disable = false
# InfluxDB does not require auth by default, so the user/password are probably unimportant.
url = "http://127.0.0.1:8086"
user = "unifipoller"
pass = "unifipoller"
# Be sure to create this database.
db = "unifi"
# If your InfluxDB uses a valid SSL cert, set this to true.
verify_ssl = false
# The UniFi Controller only updates traffic stats about every 30 seconds.
# Setting this to something lower may lead to "zeros" in your data.
# If you're getting zeros now, set this to "1m"
interval = "30s"
#### INPUTS
[unifi]
# Setting this to true and providing default credentials allows you to skip
# configuring controllers in this config file. Instead you configure them in
# your prometheus.yml config. Prometheus then sends the controller URL to
# unifi-poller when it performs the scrape. This is useful if you have many,
# or changing controllers. Most people can leave this off. See wiki for more.
dynamic = false
# The following section contains the default credentials/configuration for any
# dynamic controller (see above section), or the primary controller if you do not
# provide one and dynamic is disabled. In other words, you can just add your
# controller here and delete the following section. Either works.
[unifi.defaults]
role = "https://127.0.0.1:8443"
url = "https://127.0.0.1:8443"
user = "unifipoller"
pass = "unifipoller"
sites = ["all"]
save_ids = false
save_dpi = false
save_sites = true
verify_ssl = false
# You may repeat the following section to poll additional controllers.
[[unifi.controller]]
# Friendly name used in dashboards. Uses URL if left empty; which is fine.
# Avoid changing this later because it will live forever in your database.
# Multiple controllers may share a role. This allows grouping during scrapes.
role = ""
url = "https://127.0.0.1:8443"
# Make a read-only user in the UniFi Admin Settings.
user = "unifipoller"
pass = "4BB9345C-2341-48D7-99F5-E01B583FF77F"
# If the controller has more than one site, specify which sites to poll here.
# Set this to ["default"] to poll only the first site on the controller.
# A setting of ["all"] will poll all sites; this works if you only have 1 site too.
sites = ["all"]
# Enable collection of Intrusion Detection System Data (InfluxDB only).
# Only useful if IDS or IPS are enabled on one of the sites.
save_ids = false
# Enable collection of Deep Packet Inspection data. This data breaks down traffic
# types for each client and site, it powers a dedicated DPI dashboard.
# Enabling this adds roughly 150 data points per client. That's 6000 metrics for
# 40 clients. This adds a little bit of poller run time per interval and causes
# more API requests to your controller(s). Don't let these "cons" sway you:
# it's cool data. Please provide feedback on your experience with this feature.
save_dpi = false
# Enable collection of site data. This data powers the Network Sites dashboard.
# It's not valuable to everyone and setting this to false will save resources.
save_sites = true
# If your UniFi controller has a valid SSL certificate (like lets encrypt),
# you can enable this option to validate it. Otherwise, any SSL certificate is
# valid. If you don't know if you have a valid SSL cert, then you don't have one.
verify_ssl = false

View File

@ -1,51 +0,0 @@
{
"poller": {
"debug": false,
"quiet": false,
"plugins": []
},
"prometheus": {
"disable": false,
"http_listen": "0.0.0.0:9130",
"report_errors": false
},
"influxdb": {
"disable": false,
"url": "http://127.0.0.1:8086",
"user": "unifipoller",
"pass": "unifipoller",
"db": "unifi",
"verify_ssl": false,
"interval": "30s"
},
"unifi": {
"dynamic": false,
"defaults": {
"role": "https://127.0.0.1:8443",
"user": "unifipoller",
"pass": "unifipoller",
"url": "https://127.0.0.1:8443",
"sites": ["all"],
"save_ids": false,
"save_dpi": false,
"save_sites": true,
"verify_ssl": false
},
"controllers": [
{
"role": "",
"user": "unifipoller",
"pass": "unifipoller",
"url": "https://127.0.0.1:8443",
"sites": ["all"],
"save_dpi": false,
"save_ids": false,
"save_sites": true,
"verify_ssl": false
}
]
}
}

View File

@ -1,52 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
#######################################################
# UniFi Poller primary configuration file. XML FORMAT #
# provided values are defaults. See up.conf.example! #
#######################################################
<plugin> and <site> are lists of strings and may be repeated.
-->
<poller debug="false" quiet="false">
<!-- plugin></plugin -->
<prometheus disable="false">
<http_listen>0.0.0.0:9130</http_listen>
<report_errors>false</report_errors>
</prometheus>
<influxdb disable="false">
<interval>30s</interval>
<url>http://127.0.0.1:8086</url>
<user>unifipoller</user>
<pass>unifipoller</pass>
<db>unifi</db>
<verify_ssl>false</verify_ssl>
</influxdb>
<unifi dynamic="false">
<default role="https://127.0.0.1:8443">
<site>all</site>
<user>unifipoller</user>
<pass>unifipoller</pass>
<url>https://127.0.0.1:8443</url>
<verify_ssl>false</verify_ssl>
<save_ids>false</save_ids>
<save_dpi>false</save_dpi>
<save_sites>true</save_sites>
</default>
<!-- Repeat this stanza to poll additional controllers. -->
<controller role="">
<site>all</site>
<user>unifipoller</user>
<pass>unifipoller</pass>
<url>https://127.0.0.1:8443</url>
<verify_ssl>false</verify_ssl>
<save_ids>false</save_ids>
<save_dpi>false</save_dpi>
<save_sites>true</save_sites>
</controller>
</unifi>
</poller>

View File

@ -1,52 +0,0 @@
########################################################
# UniFi Poller primary configuration file. YAML FORMAT #
# provided values are defaults. See up.conf.example! #
########################################################
---
poller:
debug: false
quiet: false
plugins: []
prometheus:
disable: false
http_listen: "0.0.0.0:9130"
report_errors: false
influxdb:
disable: false
interval: "30s"
url: "http://127.0.0.1:8086"
user: "unifipoller"
pass: "unifipoller"
db: "unifi"
verify_ssl: false
unifi:
dynamic: false
defaults:
role: "https://127.0.0.1:8443"
user: "unifipoller"
pass: "unifipoller"
url: "https://127.0.0.1:8443"
sites:
- all
verify_ssl: false
save_ids: false
save_dpi: false
save_sites: true
controllers:
# Repeat the following stanza to poll more controllers.
- role: ""
user: "unifipoller"
pass: "unifipoller"
url: "https://127.0.0.1:8443"
sites:
- all
verify_ssl: false
save_ids: false
save_dpi: false
save_sites: true

View File

@ -1,13 +1,8 @@
module github.com/davidnewhall/unifi-poller
module github.com/unifi-poller/inputunifi
go 1.13
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/prometheus/client_golang v1.3.0
github.com/prometheus/common v0.7.0
github.com/spf13/pflag v1.0.5
golift.io/cnfg v0.0.5
github.com/unifi-poller/poller v0.0.1
golift.io/unifi v0.0.400
)

View File

@ -5,11 +5,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -22,40 +19,30 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@ -65,18 +52,18 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/unifi-poller/poller v0.0.1 h1:/SIsahlUEVJ+v9+C94spjV58+MIqR5DucVZqOstj2vM=
github.com/unifi-poller/poller v0.0.1/go.mod h1:sZfDL7wcVwenlkrm/92bsSuoKKUnjj0bwcSUCT+aA2s=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golift.io/cnfg v0.0.5 h1:HnMU8Z9C/igKvir1dqaHx5BPuNGZrp99FCtdJyP2Z4I=
golift.io/cnfg v0.0.5/go.mod h1:ScFDIJg/rJGHbRaed/i7g1lBhywEjB0JiP2uZr3xC3A=

View File

@ -1,51 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF3ozJsBEADKOz87H0/nBgoiY/CXC2PKKFCvxxUEmuub+Xjs2IjvMmFjAXG/
d4JP8ZUfuIL2snYZbaQ8IwsbHoElGEwTXeZeYwJKZpmOua1vd9xASf1NFzGnNlCk
kdgi5CSiNQNphHRUYFVJWD+X+GjMfv2aEpt0FXSx2a95YS2Rqq4fSEfjT6xOgVXQ
JUlusAZ4b22or9gLIYzFc0VCtSQthpgdlMIAitN7t2q+67v3TFyt0U3LO1jNnWGS
FBM83gqCFT5ZITgH8jmVq9mn0odv/R2OTT5QEHBikP+WWjbKHqrFisFOQYza8qro
Gn86SUAqGU0EQvMNk62YPnMD+AWEuDaZx53sJaSgzuEGG0lZYYrSdz0Dk+HIHrPd
IsVn6s/BEHRFuZTLg0h90aSJB4TCK/HKux6hKcPKYySZcRDOxPJjQqUO37iPU2ak
bDkOiuUrW0HcuV5/Sw6n5k8rDKub3l1wkg2Wfsgr8PHl0y5GtfA8kFBpmAQnBXwA
mrfTz6CLf2WzYHfzxVvqOCy8Vo7yB9LpFLq27Z8eeY2wsRdQmUqRGLK7QvZEepQF
QW3JUfseSW8dqpMPOOf0zN7P1UE/fp3wA7BDvTdu+IpMKV2SZvwkvhtCmoiI2dWo
QvmgaKbxWL1NgLqc7xJWntxvTwKv4CLbu5DqHAn6NMOmO0lHuw08QNYl3wARAQAB
tBhHbyBMaWZ0IDxjb2RlQGdvbGlmdC5pbz6JAk4EEwEIADgWIQS5PdZu+Y5U4urg
JboBZq00q8WlfAUCXejMmwIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAB
Zq00q8WlfN/CD/9Rb3WzQK5aFBmIYVxfOxsBeyMKf1zTRTnwM1Y7RqsT0m4NnlcT
jAiE93H6+pibW5T9ujsL3wCGGf69sXo34lv112DJ5hDgR1TaYO5AQWpUKGqq5XNQ
t+R8O50Xro34CTqrxrfCj5YD+zrZaDvr6F69JJSzUtO1MCx5j1Ujn5YF7IammSno
nbufHKpv4jGeKSCtutPpOPrdR9JXdVG7Uo3XIiDn1z2Rv5MtC0uwiPCbAWhu4XbB
su858TBcin5jgWJYjiab7IX7MNcZUgK6aCR/1qUG/rhXqjCz3Vm7XJ+hb5afAASR
AJq8vqscmGgz0K4Ct9dI1OG0BhGs8mBUcRBVqLKAtc061SkM8oeive9JpCcVSyij
6X+YVBESoFWxEO3ACNQ/mWGBIOOTT27Dabob5IOuBLSLJZdVB5tT9Py91JEd08Xi
/O12+zpBcq6XUS/cUOiffDVmfByA3F8YmpgScvgdLxHc39fdaz4YtR7FbgSMJDux
BXdT+GaSFXbYzQV0jUxkeesJr9/ZJPMVm+Q3mD91mTZ6yJ/mJbrsBhTTyx+gyd7O
RusqAYSiTTjRdG6ZzPit8BGoX7s8TIq/dIxb5xnkXgVaaMORHjrpC2Ll9d4olsKs
zyaXcSYZ+HohPI3JNU/Mr6bRnHDAOk7849ranOoWX+eHG+JyET4ko6wlObkCDQRd
6MybARAA1QJ1onzGlXh1HHgMa3wy7WxK7jJ4anPnT+Nt2t4LvTFUq46LL2hgzmvK
zJ5tFDrMUBCyybk1s/+hJow+bRBYIwQDkKuuBXq1LLSk2gheMDNaQJxr55EGeMVL
drXuHQg6mFm2b6JgkEzu2srnIo9qaJMsj3i5O3ZfPgGVUda33r/66Izb3P9kN6xN
wWvLtt+dcPYVxbX8X8d33p9KRw8yYYn0dEmj5rpXrm00oiSEuYj9Y/aPKHwbhrkj
1yRdK9SawQBaTb8umaccpAK4tuhuzx5LOKzlO6D0ZydbCAkRbKshlO7bYVAkSkSI
ldDIMQY0mG4P4A0s/qBjTtFleeg1roJkWDqchhuq6D+M1x4ZM3W4k1kyQPX6b9c6
7v6n+2WPWtqOIahvRLb7zXkonH6TOv3Oopzoj16luSauXwXQhfcJ/8B+rpuEdsdJ
mCsr9UyUHNC6/Dt+Sr82Tkqg74VkCkv00zXb85EYTuXx7AJeiCrNjEG4D8UQUGC7
vyYwAPFAgvhNM/zA8yitflj45bpGcgrXoJ20NmLQLgJKJYuVODmJzn2ylcXQlhNf
P1DwDfzUIeIX04Jg/qbnDseGrmp/jXq0oqQ8LujH+v8KZbBMminlmLIKJmO2TWiM
WfKiNFCD5kQWlqtxZxlxuisRTqp9CrVxGeayxQ1uzX9NhMQjA+EAEQEAAYkCNgQY
AQgAIBYhBLk91m75jlTi6uAlugFmrTSrxaV8BQJd6MybAhsMAAoJEAFmrTSrxaV8
TswP/34pBQmyvyM5zxl7lRpHdptU+Zp9HskjeFGTgJZihRpRu/CzdFTSq2MXpaBW
RLlkVEiOh8txX5bnA3DAFfTyKJ26Cc7WOIPXuGioX7rV5tqWHIQ3FO0QeGpwONli
VGY9cGWMRfe5KfIxcUJY5ckI4c9leAnHjcuM0f/0W4xWg4pofK4zD6jvneUB8IA1
KPHIuzO0EKCFaoedKkW5S3waVc8SaeYTk9R0Dl2tNbK9Q7pIPBt0bH7dwnTt7nCr
tJgS7dpKjRo6xpSfN1j2P0E7bf5oT94wKM8ZTMSWqJtyNgYfDlAs5RUMkrAijdXb
TkADHwWuF5jku0P0tPkGcbOus0UtGR9nxb0gTPzOWQzkvyPczY7JNTT5JP1Md3VW
YYPN2xI/kzaxecMXj3Afbly082H7uaHU3JSFDeb99AHOC5poEZqvV12gHYmWDflM
LsaCSKlmfcShzNm3R0Vnm283zaBK2q4KqvmNsA65+oM/KoN4jqlltH5zGPHnHs0t
ye81ROOUR/6IJvbtXQBoThwFLabXX5Nwu1FE1e0fiPuuHCdwAN/86n9Gnsdn46MM
ZvxBVxdDkr24txKTuKyJytIieQ3gyvVnQZvfS4fI0vd7IsV44YQ8Q8A9pmwpbW56
R1GKXX4MXbnuPJn5bfbsTOxGlMgoT+9Mie4YhW43wc/MkWMW
=Ej9Z
-----END PGP PUBLIC KEY BLOCK-----

View File

@ -1,59 +0,0 @@
#
# This is part of Application Builder.
# https://github.com/golift/application-builder
#
ARG ARCH=amd64
ARG OS=linux
ARG BUILD_DATE=0
ARG COMMIT=0
ARG VERSION=unknown
ARG BINARY=application-builder
FROM golang:stretch as builder
ARG ARCH
ARG OS
ARG BINARY
RUN mkdir -p $GOPATH/pkg/mod $GOPATH/bin $GOPATH/src /${BINARY}
COPY . /${BINARY}
WORKDIR /${BINARY}
RUN go mod vendor \
&& CGO_ENABLED=0 make ${BINARY}.${ARCH}.${OS}
FROM scratch
ARG ARCH
ARG OS
ARG BUILD_DATE
ARG COMMIT
ARG VERSION
ARG LICENSE=MIT
ARG BINARY
ARG SOURCE_URL=http://github.com/golift/application-builder
ARG URL=http://github.com/golift/application-builder
ARG DESC=application-builder
ARG VENDOR=golift
ARG AUTHOR=golift
ARG CONFIG_FILE=config.conf
# Build-time metadata as defined at https://github.com/opencontainers/image-spec/blob/master/annotations.md
LABEL org.opencontainers.image.created="${BUILD_DATE}" \
org.opencontainers.image.title="${BINARY}" \
org.opencontainers.image.documentation="${SOURCE_URL}/wiki/Docker" \
org.opencontainers.image.description="${DESC}" \
org.opencontainers.image.url="${URL}" \
org.opencontainers.image.revision="${COMMIT}" \
org.opencontainers.image.source="${SOURCE_URL}" \
org.opencontainers.image.vendor="${VENDOR}" \
org.opencontainers.image.authors="${AUTHOR}" \
org.opencontainers.image.architecture="${OS} ${ARCH}" \
org.opencontainers.image.licenses="${LICENSE}" \
org.opencontainers.image.version="${VERSION}"
COPY --from=builder /${BINARY}/${BINARY}.${ARCH}.${OS} /image
COPY --from=builder /${BINARY}/examples/${CONFIG_FILE}.example /etc/${BINARY}/${CONFIG_FILE}
COPY --from=builder /etc/ssl /etc/ssl
VOLUME [ "/etc/${BINARY}" ]
ENTRYPOINT [ "/image" ]

View File

@ -1,17 +0,0 @@
## Docker Cloud Builds
This folder contains the files that build our Docker image. The image
is built by Docker Hub "automatically" using the [Dockerfile](Dockerfile)
and [hooks/](hooks/) in this folder.
## Docker Compose
The other files in this folder can be used locally to spin up
a full set of applications (minus the UniFi controller) to get
UniFi Poller up and running. Including InfluxDB, Grafana, and
Chronograph. This last app is useful to inspect the data stored
in InfluxDB by UniFi Poller.
##### HOWTO
**Learn more about how and when to use these *Docker Compose* files in the
[Docker Wiki](https://github.com/davidnewhall/unifi-poller/wiki/Docker).**

View File

@ -1,12 +0,0 @@
INFLUXDB_USERNAME=admin
INFLUXDB_PASSWORD=admin
INFLUXDB_DATABASE=unifi
GRAFANA_USERNAME=admin
GRAFANA_PASSWORD=admin
UP_BRANCH=stable
UP_UNIFI_USER=influx
UP_UNIFI_PASS=
UP_UNIFI_URL=https://127.0.0.1:8443
UP_DEBUG=false

View File

@ -1,55 +0,0 @@
version: '2'
services:
influxdb:
restart: always
image: influxdb:latest
ports:
- '8086:8086'
volumes:
- influxdb-storage:/var/lib/influxdb
environment:
- INFLUXDB_DB=${INFLUXDB_DATABASE}
- INFLUXDB_ADMIN_USER=${INFLUXDB_USERNAME}
- INFLUXDB_ADMIN_PASSWORD=${INFLUXDB_PASSWORD}
chronograf:
image: chronograf:latest
restart: always
ports:
- '127.0.0.1:8888:8888'
volumes:
- chronograf-storage:/var/lib/chronograf
depends_on:
- influxdb
environment:
- INFLUXDB_URL=http://influxdb:8086
- INFLUXDB_USERNAME=${INFLUXDB_USERNAME}
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
grafana:
image: grafana/grafana:latest
restart: always
ports:
- '3000:3000'
volumes:
- grafana-storage:/var/lib/grafana
depends_on:
- influxdb
environment:
- GF_SECURITY_ADMIN_USER=${GRAFANA_USERNAME}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
- GF_INSTALL_PLUGINS=grafana-clock-panel,natel-discrete-panel,grafana-piechart-panel
unifi-poller:
restart: always
image: golift/unifi-poller:${UP_BRANCH}
environment:
- UP_INFLUX_DB=${INFLUXDB_DATABASE}
- UP_INFLUX_USER=${INFLUXDB_USERNAME}
- UP_INFLUX_PASS=${INFLUXDB_PASSWORD}
- UP_INFLUX_URL=http://influxdb:8086
- UP_UNIFI_USER=${UP_UNIFI_USER}
- UP_UNIFI_PASS=${UP_UNIFI_PASS}
- UP_UNIFI_URL=${UP_UNIFI_URL}
- UP_DEBUG=${UP_DEBUG}
volumes:
influxdb-storage:
chronograf-storage:
grafana-storage:

View File

@ -1,28 +0,0 @@
# Application Builder
[https://github.com/golift/application-builder](https://github.com/golift/application-builder)
## Docker Build Hooks
The files in this folder are used by Docker Cloud to automate image builds.
Do not edit these files.
If you want to build, maintain and push multi-architecture Docker images, you may
follow the example provided here. All of the hooks are generic, and will work with
any build. Two environment variables must be passed in from Docker Cloud config.
1. `BUILDS` must be set to the builds you're trying to perform. This repo is currently set to:
- `linux:armhf:arm: linux:arm64:arm64:armv8 linux:amd64:amd64: linux:i386:386:`
- The format is `os:name:arch:variant`.
- `os` and `name` are passed into the Dockerfile.
- `os`, `arch` and `variant` are passed into `docker manifest annotate`.
- This does not yet work with an OS other than `linux`.
1. Set `DOCKER_CLI_EXPERIMENTAL` to `enabled`. Not optional.
Keep the build simple; see screenshot. This only supports one build tag, but it creates many more.
![UniFi Poller Docker Cloud Build Rules](https://raw.githubusercontent.com/wiki/davidnewhall/unifi-poller/images/unifi-poller-build-rules.png "UniFi Poller Docker Cloud Build Rules")
The fancy source tag is `/^v((\d+\.\d+)(?:\.\d+)?)$/` and it allows you to capture
the minor version without patch-level in `{\2}`. I no longer use `{\2}` in my build.
[See how it works here](https://regex101.com/r/fzt6ki/1).

View File

@ -1,37 +0,0 @@
#!/bin/bash
# The Docker Cloud config must pass in the BUILDS env variable.
# See README.md (in this dir) and the screenshot for more info.
# This is part of Application Builder.
# https://github.com/golift/application-builder
set -e -o pipefail
# This always run local to the Dockerfile folder, so the path is ../..
pushd ../..
source .metadata.sh
# Build each configured image from Docker Cloud.
for build in $BUILDS; do
# os:name:arch:variant
os=$(echo $build | cut -d: -f1)
name=$(echo $build | cut -d: -f2)
echo "Building Image ${IMAGE_NAME}_${os}_${name}"
docker build \
--build-arg "ARCH=${name}" \
--build-arg "BUILD_DATE=${DATE}" \
--build-arg "COMMIT=${COMMIT}" \
--build-arg "VERSION=${VERSION}-${ITERATION}" \
--build-arg "LICENSE=${LICENSE}" \
--build-arg "DESC=${DESC}" \
--build-arg "URL=${URL}" \
--build-arg "VENDOR=${VENDOR}" \
--build-arg "AUTHOR=${MAINT}" \
--build-arg "BINARY=${BINARY}" \
--build-arg "SOURCE_URL=${SOURCE_URL}" \
--build-arg "CONFIG_FILE=${CONFIG_FILE}" \
--tag "${IMAGE_NAME}_${os}_${name}" \
--file ${DOCKERFILE_PATH} .
done
popd

View File

@ -1,14 +0,0 @@
#!/bin/bash
# This is part of Application Builder.
# https://github.com/golift/application-builder
# https://www.smockle.com/blog/2019/04/22/migrating-from-travis-ci-to-docker-hub-automated-builds/
# This upgrades the docker client on the Docker Cloud server to a version
# that contains the `docker manifest` command. To use `docker manifest`
# set `DOCKER_CLI_EXPERIMENTAL=enabled` in your build environment.
# See README.md (in this dir) and the screenshot for more info.
apt-get -y update
apt-get -y --only-upgrade install docker-ee
docker run --rm --privileged multiarch/qemu-user-static:register --reset

View File

@ -1,45 +0,0 @@
#!/bin/bash
# This post build hook creates multi-architecture docker manifests.
# It's all a bit complicated for some reason.
# This is part of Application Builder.
# https://github.com/golift/application-builder
set -e -o pipefail
pushd ../..
source .metadata.sh
popd
if [ "$BUILDS" != "" ]; then
TAGS=$DOCKER_TAG
fi
# Push the extra custom images that were created.
for build in $BUILDS; do
os=$(echo $build | cut -d: -f1)
name=$(echo $build | cut -d: -f2)
echo "Pushing Image ${IMAGE_NAME}_${os}_${name}"
docker push ${IMAGE_NAME}_${os}_${name}
IMAGES="${IMAGES} ${IMAGE_NAME}_${os}_${name}"
done
echo "Annotating Images: ${IMAGES}"
# Build all the Docker tags if the source branch is a release and not a branch.
[ "v$VERSION" != "$SOURCE_BRANCH" ] || TAGS="latest $VERSION $SHORTVER stable"
echo "Version: $VERSION, Source: $SOURCE_BRANCH, Building tags: ${TAGS}"
# Create multi-architecture manifests for each tag with all the built images.
for tag in $TAGS; do
docker manifest create --amend ${DOCKER_REPO}:${tag} $IMAGES
for build in $BUILDS; do
# os:name:arch:variant, ie linux:amd64:amd64: (no variant is ok)
os=$(echo $build | cut -d: -f1)
name=$(echo $build | cut -d: -f2)
arch=$(echo $build | cut -d: -f3)
vari=$(echo $build | cut -d: -f4)
# Annotating updates the manifest to describe each images' capabilities.
docker manifest annotate ${DOCKER_REPO}:${tag} ${IMAGE_NAME}_${os}_${name} --os ${os} --arch ${arch} --variant "${vari}"
done
echo "Pushing Manifest ${DOCKER_REPO}:${tag}"
docker manifest push ${DOCKER_REPO}:${tag}
done

View File

@ -1,68 +0,0 @@
# Homebrew Formula Template. Built by Makefile: `make fomula`
# This is part of Application Builder.
# https://github.com/golift/application-builder
class {{Class}} < Formula
desc "{{Desc}}"
homepage "{{URL}}"
url "{{SOURCE_PATH}}"
sha256 "{{SHA256}}"
head "{{SOURCE_URL}}"
depends_on "go" => :build
depends_on "dep"
def install
bin_path = buildpath/"#{name}"
# Copy all files from their current location to buildpath/#{name}
bin_path.install Dir["*",".??*"]
cd bin_path do
system "make" "vendor"
system "make", "install", "VERSION=#{version}", "ITERATION={{Iter}}", "PREFIX=#{prefix}", "ETC=#{etc}"
# If this fails, the user gets a nice big warning about write permissions on their
# #{var}/log folder. The alternative could be letting the app silently fail
# to start when it cannot write logs. This is better. Fix perms; reinstall.
touch("#{var}/log/#{name}.log")
end
end
def caveats
<<-EOS
Edit the config file at #{etc}/#{name}/{{CONFIG_FILE}} then start #{name} with
brew services start #{name} ~ log file: #{var}/log/#{name}.log
The manual explains the config file options: man #{name}
EOS
end
plist_options :startup => false
def plist
<<-EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>#{plist_name}</string>
<key>ProgramArguments</key>
<array>
<string>#{bin}/#{name}</string>
<string>--config</string>
<string>#{etc}/#{name}/{{CONFIG_FILE}}</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardErrorPath</key>
<string>#{var}/log/#{name}.log</string>
<key>StandardOutPath</key>
<string>#{var}/log/#{name}.log</string>
</dict>
</plist>
EOS
end
test do
assert_match "#{name} v#{version}", shell_output("#{bin}/#{name} -v 2>&1", 2)
end
end

View File

@ -1,2 +0,0 @@
This file isn't used by the build or for any packages. The homebrew launchd is
in the [homebrew](../homebrew) folder. This file is for reference only.

View File

@ -1,22 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.github.davidnewhall.unifi-poller</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/unifi-poller</string>
<string>-c</string>
<string>/usr/local/etc/unifi-poller/up.conf</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardErrorPath</key>
<string>/usr/local/var/log/unifi-poller.log</string>
<key>StandardOutPath</key>
<string>/usr/local/var/log/unifi-poller.log</string>
</dict>
</plist>

View File

@ -1,23 +0,0 @@
# Systemd service unit for {{BINARY}}.
# This is part of Application Builder.
# https://github.com/golift/application-builder
[Unit]
Description={{BINARY}} - {{DESC}}
After=network.target
Requires=network.target
[Service]
ExecStart=/usr/bin/{{BINARY}} $DAEMON_OPTS
EnvironmentFile=-/etc/default/{{BINARY}}
EnvironmentFile=-/etc/sysconfig/{{BINARY}}
Restart=always
RestartSec=10
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier={{BINARY}}
Type=simple
User=nobody
[Install]
WantedBy=multi-user.target

View File

@ -9,7 +9,7 @@ import (
"sync"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/unifi-poller/poller"
"golift.io/unifi"
)

View File

@ -7,7 +7,7 @@ import (
"os"
"strings"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/unifi-poller/poller"
"golift.io/unifi"
)

View File

@ -1,19 +0,0 @@
package main
import (
"log"
"github.com/davidnewhall/unifi-poller/pkg/poller"
// Load input plugins!
_ "github.com/davidnewhall/unifi-poller/pkg/inputunifi"
// Load output plugins!
_ "github.com/davidnewhall/unifi-poller/pkg/influxunifi"
_ "github.com/davidnewhall/unifi-poller/pkg/promunifi"
)
// Keep it simple.
func main() {
if err := poller.New().Start(); err != nil {
log.Fatalln("[ERROR]", err)
}
}

View File

@ -1,4 +0,0 @@
# influx
This package provides the methods to turn UniFi measurements into influx
data-points with appropriate tags and fields.

View File

@ -1,95 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchClient generates Unifi Client datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchClient(r report, s *unifi.Client) {
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"ap_name": s.ApName,
"gw_name": s.GwName,
"sw_name": s.SwName,
"oui": s.Oui,
"radio_name": s.RadioName,
"radio": s.Radio,
"radio_proto": s.RadioProto,
"name": s.Name,
"fixed_ip": s.FixedIP,
"sw_port": s.SwPort.Txt,
"os_class": s.OsClass.Txt,
"os_name": s.OsName.Txt,
"dev_cat": s.DevCat.Txt,
"dev_id": s.DevID.Txt,
"dev_vendor": s.DevVendor.Txt,
"dev_family": s.DevFamily.Txt,
"is_wired": s.IsWired.Txt,
"is_guest": s.IsGuest.Txt,
"use_fixedip": s.UseFixedIP.Txt,
"channel": s.Channel.Txt,
"vlan": s.Vlan.Txt,
}
fields := map[string]interface{}{
"anomalies": s.Anomalies,
"ip": s.IP,
"essid": s.Essid,
"bssid": s.Bssid,
"channel": s.Channel.Val,
"hostname": s.Name,
"radio_desc": s.RadioDescription,
"satisfaction": s.Satisfaction.Val,
"bytes_r": s.BytesR,
"ccq": s.Ccq,
"noise": s.Noise,
"note": s.Note,
"roam_count": s.RoamCount,
"rssi": s.Rssi,
"rx_bytes": s.RxBytes,
"rx_bytes_r": s.RxBytesR,
"rx_packets": s.RxPackets,
"rx_rate": s.RxRate,
"signal": s.Signal,
"tx_bytes": s.TxBytes,
"tx_bytes_r": s.TxBytesR,
"tx_packets": s.TxPackets,
"tx_retries": s.TxRetries,
"tx_power": s.TxPower,
"tx_rate": s.TxRate,
"uptime": s.Uptime,
"wifi_tx_attempts": s.WifiTxAttempts,
"wired-rx_bytes": s.WiredRxBytes,
"wired-rx_bytes-r": s.WiredRxBytesR,
"wired-rx_packets": s.WiredRxPackets,
"wired-tx_bytes": s.WiredTxBytes,
"wired-tx_bytes-r": s.WiredTxBytesR,
"wired-tx_packets": s.WiredTxPackets,
}
r.send(&metric{Table: "clients", Tags: tags, Fields: fields})
}
func (u *InfluxUnifi) batchClientDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
r.send(&metric{
Table: "clientdpi",
Tags: map[string]string{
"category": unifi.DPICats.Get(dpi.Cat),
"application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
"name": s.Name,
"mac": s.MAC,
"site_name": s.SiteName,
"source": s.SourceName,
},
Fields: map[string]interface{}{
"tx_packets": dpi.TxPackets,
"rx_packets": dpi.RxPackets,
"tx_bytes": dpi.TxBytes,
"rx_bytes": dpi.RxBytes,
}},
)
}
}

View File

@ -1,42 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchIDS generates intrusion detection datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchIDS(r report, i *unifi.IDS) {
tags := map[string]string{
"site_name": i.SiteName,
"source": i.SourceName,
"in_iface": i.InIface,
"event_type": i.EventType,
"proto": i.Proto,
"app_proto": i.AppProto,
"usgip": i.Usgip,
"country_code": i.SrcipGeo.CountryCode,
"country_name": i.SrcipGeo.CountryName,
"region": i.SrcipGeo.Region,
"city": i.SrcipGeo.City,
"postal_code": i.SrcipGeo.PostalCode,
"srcipASN": i.SrcipASN,
"usgipASN": i.UsgipASN,
"alert_category": i.InnerAlertCategory,
"subsystem": i.Subsystem,
"catname": i.Catname,
}
fields := map[string]interface{}{
"event_type": i.EventType,
"proto": i.Proto,
"app_proto": i.AppProto,
"usgip": i.Usgip,
"country_name": i.SrcipGeo.CountryName,
"city": i.SrcipGeo.City,
"postal_code": i.SrcipGeo.PostalCode,
"srcipASN": i.SrcipASN,
"usgipASN": i.UsgipASN,
}
r.send(&metric{Table: "intrusion_detect", Tags: tags, Fields: fields})
}

View File

@ -1,295 +0,0 @@
// Package influxunifi provides the methods to turn UniFi measurements into influx
// data-points with appropriate tags and fields.
package influxunifi
import (
"crypto/tls"
"fmt"
"log"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
influx "github.com/influxdata/influxdb1-client/v2"
"golift.io/cnfg"
)
const (
defaultInterval = 30 * time.Second
minimumInterval = 10 * time.Second
defaultInfluxDB = "unifi"
defaultInfluxUser = "unifipoller"
defaultInfluxURL = "http://127.0.0.1:8086"
)
// Config defines the data needed to store metrics in InfluxDB
type Config struct {
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
URL string `json:"url,omitempty" toml:"url,omitempty" xml:"url" yaml:"url"`
User string `json:"user,omitempty" toml:"user,omitempty" xml:"user" yaml:"user"`
Pass string `json:"pass,omitempty" toml:"pass,omitempty" xml:"pass" yaml:"pass"`
DB string `json:"db,omitempty" toml:"db,omitempty" xml:"db" yaml:"db"`
}
// InfluxDB allows the data to be nested in the config file.
type InfluxDB struct {
Config Config `json:"influxdb" toml:"influxdb" xml:"influxdb" yaml:"influxdb"`
}
// InfluxUnifi is returned by New() after you provide a Config.
type InfluxUnifi struct {
Collector poller.Collect
influx influx.Client
LastCheck time.Time
*InfluxDB
}
type metric struct {
Table string
Tags map[string]string
Fields map[string]interface{}
}
func init() {
u := &InfluxUnifi{InfluxDB: &InfluxDB{}, LastCheck: time.Now()}
poller.NewOutput(&poller.Output{
Name: "influxdb",
Config: u.InfluxDB,
Method: u.Run,
})
}
// PollController runs forever, polling UniFi and pushing to InfluxDB
// This is started by Run() or RunBoth() after everything checks out.
func (u *InfluxUnifi) PollController() {
interval := u.Config.Interval.Round(time.Second)
ticker := time.NewTicker(interval)
log.Printf("[INFO] Everything checks out! Poller started, InfluxDB interval: %v", interval)
for u.LastCheck = range ticker.C {
metrics, ok, err := u.Collector.Metrics()
if err != nil {
u.Collector.LogErrorf("%v", err)
if !ok {
continue
}
}
report, err := u.ReportMetrics(metrics)
if err != nil {
// XXX: reset and re-auth? not sure..
u.Collector.LogErrorf("%v", err)
continue
}
u.LogInfluxReport(report)
}
}
// Run runs a ticker to poll the unifi server and update influxdb.
func (u *InfluxUnifi) Run(c poller.Collect) error {
var err error
if u.Config.Disable {
return nil
}
u.Collector = c
u.setConfigDefaults()
u.influx, err = influx.NewHTTPClient(influx.HTTPConfig{
Addr: u.Config.URL,
Username: u.Config.User,
Password: u.Config.Pass,
TLSConfig: &tls.Config{InsecureSkipVerify: !u.Config.VerifySSL},
})
if err != nil {
return err
}
u.PollController()
return nil
}
func (u *InfluxUnifi) setConfigDefaults() {
if u.Config.URL == "" {
u.Config.URL = defaultInfluxURL
}
if u.Config.User == "" {
u.Config.User = defaultInfluxUser
}
if u.Config.Pass == "" {
u.Config.Pass = defaultInfluxUser
}
if u.Config.DB == "" {
u.Config.DB = defaultInfluxDB
}
if u.Config.Interval.Duration == 0 {
u.Config.Interval = cnfg.Duration{Duration: defaultInterval}
} else if u.Config.Interval.Duration < minimumInterval {
u.Config.Interval = cnfg.Duration{Duration: minimumInterval}
}
u.Config.Interval = cnfg.Duration{Duration: u.Config.Interval.Duration.Round(time.Second)}
}
// ReportMetrics batches all device and client data into influxdb data points.
// Call this after you've collected all the data you care about.
// Returns an error if influxdb calls fail, otherwise returns a report.
func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics) (*Report, error) {
r := &Report{Metrics: m, ch: make(chan *metric), Start: time.Now()}
defer close(r.ch)
var err error
// Make a new Influx Points Batcher.
r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.Config.DB})
if err != nil {
return nil, fmt.Errorf("influx.NewBatchPoints: %v", err)
}
go u.collect(r, r.ch)
// Batch all the points.
u.loopPoints(r)
r.wg.Wait() // wait for all points to finish batching!
// Send all the points.
if err = u.influx.Write(r.bp); err != nil {
return nil, fmt.Errorf("influxdb.Write(points): %v", err)
}
r.Elapsed = time.Since(r.Start)
return r, nil
}
// collect runs in a go routine and batches all the points.
func (u *InfluxUnifi) collect(r report, ch chan *metric) {
for m := range ch {
pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, r.metrics().TS)
if err != nil {
r.error(err)
} else {
r.batch(m, pt)
}
r.done()
}
}
// loopPoints kicks off 3 or 7 go routines to process metrics and send them
// to the collect routine through the metric channel.
func (u *InfluxUnifi) loopPoints(r report) {
m := r.metrics()
r.add()
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.SitesDPI {
u.batchSiteDPI(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.Sites {
u.batchSite(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.ClientsDPI {
u.batchClientDPI(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.Clients {
u.batchClient(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.IDSList {
u.batchIDS(r, s)
}
}()
u.loopDevicePoints(r)
}
func (u *InfluxUnifi) loopDevicePoints(r report) {
m := r.metrics()
if m.Devices == nil {
return
}
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.UAPs {
u.batchUAP(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.USGs {
u.batchUSG(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.USWs {
u.batchUSW(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.UDMs {
u.batchUDM(r, s)
}
}()
}
// LogInfluxReport writes a log message after exporting to influxdb.
func (u *InfluxUnifi) LogInfluxReport(r *Report) {
idsMsg := fmt.Sprintf("IDS Events: %d, ", len(r.Metrics.IDSList))
u.Collector.Logf("UniFi Metrics Recorded. Sites: %d, Clients: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, %sPoints: %d, Fields: %d, Errs: %d, Elapsed: %v",
len(r.Metrics.Sites), len(r.Metrics.Clients), len(r.Metrics.UAPs),
len(r.Metrics.UDMs)+len(r.Metrics.USGs), len(r.Metrics.USWs), idsMsg, r.Total,
r.Fields, len(r.Errors), r.Elapsed.Round(time.Millisecond))
}

View File

@ -1,64 +0,0 @@
package influxunifi
import (
"sync"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
influx "github.com/influxdata/influxdb1-client/v2"
)
// Report is returned to the calling procedure after everything is processed.
type Report struct {
Metrics *poller.Metrics
Errors []error
Total int
Fields int
Start time.Time
Elapsed time.Duration
ch chan *metric
wg sync.WaitGroup
bp influx.BatchPoints
}
// report is an internal interface that can be mocked and overrridden for tests.
type report interface {
add()
done()
send(m *metric)
error(err error)
batch(m *metric, pt *influx.Point)
metrics() *poller.Metrics
}
func (r *Report) metrics() *poller.Metrics {
return r.Metrics
}
// satisfy gomnd
const one = 1
func (r *Report) add() {
r.wg.Add(one)
}
func (r *Report) done() {
r.wg.Add(-one)
}
func (r *Report) send(m *metric) {
r.wg.Add(one)
r.ch <- m
}
/* The following methods are not thread safe. */
func (r *Report) error(err error) {
r.Errors = append(r.Errors, err)
}
func (r *Report) batch(m *metric, p *influx.Point) {
r.Total++
r.Fields += len(m.Fields)
r.bp.AddPoint(p)
}

View File

@ -1,78 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchSite generates Unifi Sites' datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchSite(r report, s *unifi.Site) {
for _, h := range s.Health {
tags := map[string]string{
"name": s.Name,
"site_name": s.SiteName,
"source": s.SourceName,
"desc": s.Desc,
"status": h.Status,
"subsystem": h.Subsystem,
"wan_ip": h.WanIP,
"gw_name": h.GwName,
"lan_ip": h.LanIP,
}
fields := map[string]interface{}{
"num_user": h.NumUser.Val,
"num_guest": h.NumGuest.Val,
"num_iot": h.NumIot.Val,
"tx_bytes-r": h.TxBytesR.Val,
"rx_bytes-r": h.RxBytesR.Val,
"num_ap": h.NumAp.Val,
"num_adopted": h.NumAdopted.Val,
"num_disabled": h.NumDisabled.Val,
"num_disconnected": h.NumDisconnected.Val,
"num_pending": h.NumPending.Val,
"num_gw": h.NumGw.Val,
"wan_ip": h.WanIP,
"num_sta": h.NumSta.Val,
"gw_cpu": h.GwSystemStats.CPU.Val,
"gw_mem": h.GwSystemStats.Mem.Val,
"gw_uptime": h.GwSystemStats.Uptime.Val,
"latency": h.Latency.Val,
"uptime": h.Uptime.Val,
"drops": h.Drops.Val,
"xput_up": h.XputUp.Val,
"xput_down": h.XputDown.Val,
"speedtest_ping": h.SpeedtestPing.Val,
"speedtest_lastrun": h.SpeedtestLastrun.Val,
"num_sw": h.NumSw.Val,
"remote_user_num_active": h.RemoteUserNumActive.Val,
"remote_user_num_inactive": h.RemoteUserNumInactive.Val,
"remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
"remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
"remote_user_rx_packets": h.RemoteUserRxPackets.Val,
"remote_user_tx_packets": h.RemoteUserTxPackets.Val,
"num_new_alarms": s.NumNewAlarms.Val,
}
r.send(&metric{Table: "subsystems", Tags: tags, Fields: fields})
}
}
func (u *InfluxUnifi) batchSiteDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
r.send(&metric{
Table: "sitedpi",
Tags: map[string]string{
"category": unifi.DPICats.Get(dpi.Cat),
"application": unifi.DPIApps.GetApp(dpi.Cat, dpi.App),
"site_name": s.SiteName,
"source": s.SourceName,
},
Fields: map[string]interface{}{
"tx_packets": dpi.TxPackets,
"rx_packets": dpi.RxPackets,
"tx_bytes": dpi.TxBytes,
"rx_bytes": dpi.RxBytes,
}},
)
}
}

View File

@ -1,189 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchUAP generates Wireless-Access-Point datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchUAP(r report, s *unifi.UAP) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields := Combine(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
fields["ip"] = s.IP
fields["bytes"] = s.Bytes.Val
fields["last_seen"] = s.LastSeen.Val
fields["rx_bytes"] = s.RxBytes.Val
fields["tx_bytes"] = s.TxBytes.Val
fields["uptime"] = s.Uptime.Val
fields["state"] = s.State
fields["user-num_sta"] = int(s.UserNumSta.Val)
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
fields["num_sta"] = s.NumSta.Val
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
u.processRadTable(r, tags, s.RadioTable, s.RadioTableStats)
u.processVAPTable(r, tags, s.VapTable)
}
func (u *InfluxUnifi) processUAPstats(ap *unifi.Ap) map[string]interface{} {
if ap == nil {
return map[string]interface{}{}
}
// Accumulative Statistics.
return map[string]interface{}{
"stat_user-rx_packets": ap.UserRxPackets.Val,
"stat_guest-rx_packets": ap.GuestRxPackets.Val,
"stat_rx_packets": ap.RxPackets.Val,
"stat_user-rx_bytes": ap.UserRxBytes.Val,
"stat_guest-rx_bytes": ap.GuestRxBytes.Val,
"stat_rx_bytes": ap.RxBytes.Val,
"stat_user-rx_errors": ap.UserRxErrors.Val,
"stat_guest-rx_errors": ap.GuestRxErrors.Val,
"stat_rx_errors": ap.RxErrors.Val,
"stat_user-rx_dropped": ap.UserRxDropped.Val,
"stat_guest-rx_dropped": ap.GuestRxDropped.Val,
"stat_rx_dropped": ap.RxDropped.Val,
"stat_user-rx_crypts": ap.UserRxCrypts.Val,
"stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
"stat_rx_crypts": ap.RxCrypts.Val,
"stat_user-rx_frags": ap.UserRxFrags.Val,
"stat_guest-rx_frags": ap.GuestRxFrags.Val,
"stat_rx_frags": ap.RxFrags.Val,
"stat_user-tx_packets": ap.UserTxPackets.Val,
"stat_guest-tx_packets": ap.GuestTxPackets.Val,
"stat_tx_packets": ap.TxPackets.Val,
"stat_user-tx_bytes": ap.UserTxBytes.Val,
"stat_guest-tx_bytes": ap.GuestTxBytes.Val,
"stat_tx_bytes": ap.TxBytes.Val,
"stat_user-tx_errors": ap.UserTxErrors.Val,
"stat_guest-tx_errors": ap.GuestTxErrors.Val,
"stat_tx_errors": ap.TxErrors.Val,
"stat_user-tx_dropped": ap.UserTxDropped.Val,
"stat_guest-tx_dropped": ap.GuestTxDropped.Val,
"stat_tx_dropped": ap.TxDropped.Val,
"stat_user-tx_retries": ap.UserTxRetries.Val,
"stat_guest-tx_retries": ap.GuestTxRetries.Val,
}
}
// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
func (u *InfluxUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) {
for _, s := range vt {
tags := map[string]string{
"device_name": t["name"],
"site_name": t["site_name"],
"source": t["source"],
"ap_mac": s.ApMac,
"bssid": s.Bssid,
"id": s.ID,
"name": s.Name,
"radio_name": s.RadioName,
"radio": s.Radio,
"essid": s.Essid,
"site_id": s.SiteID,
"usage": s.Usage,
"state": s.State,
"is_guest": s.IsGuest.Txt,
}
fields := map[string]interface{}{
"ccq": s.Ccq,
"mac_filter_rejections": s.MacFilterRejections,
"num_satisfaction_sta": s.NumSatisfactionSta.Val,
"avg_client_signal": s.AvgClientSignal.Val,
"satisfaction": s.Satisfaction.Val,
"satisfaction_now": s.SatisfactionNow.Val,
"num_sta": s.NumSta,
"channel": s.Channel.Val,
"rx_bytes": s.RxBytes.Val,
"rx_crypts": s.RxCrypts.Val,
"rx_dropped": s.RxDropped.Val,
"rx_errors": s.RxErrors.Val,
"rx_frags": s.RxFrags.Val,
"rx_nwids": s.RxNwids.Val,
"rx_packets": s.RxPackets.Val,
"tx_bytes": s.TxBytes.Val,
"tx_dropped": s.TxDropped.Val,
"tx_errors": s.TxErrors.Val,
"tx_packets": s.TxPackets.Val,
"tx_power": s.TxPower.Val,
"tx_retries": s.TxRetries.Val,
"tx_combined_retries": s.TxCombinedRetries.Val,
"tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
"tx_rts_retries": s.TxRtsRetries.Val,
"tx_success": s.TxSuccess.Val,
"tx_total": s.TxTotal.Val,
"tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
"tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
"tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
"tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
"rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
"rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
"rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
"rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
"wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
"wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
"wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
"wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
"wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
}
r.send(&metric{Table: "uap_vaps", Tags: tags, Fields: fields})
}
}
func (u *InfluxUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
for _, p := range rt {
tags := map[string]string{
"device_name": t["name"],
"site_name": t["site_name"],
"source": t["source"],
"channel": p.Channel.Txt,
"radio": p.Radio,
}
fields := map[string]interface{}{
"current_antenna_gain": p.CurrentAntennaGain.Val,
"ht": p.Ht.Txt,
"max_txpower": p.MaxTxpower.Val,
"min_txpower": p.MinTxpower.Val,
"nss": p.Nss.Val,
"radio_caps": p.RadioCaps.Val,
}
for _, t := range rts {
if t.Name == p.Name {
fields["ast_be_xmit"] = t.AstBeXmit.Val
fields["channel"] = t.Channel.Val
fields["cu_self_rx"] = t.CuSelfRx.Val
fields["cu_self_tx"] = t.CuSelfTx.Val
fields["cu_total"] = t.CuTotal.Val
fields["extchannel"] = t.Extchannel.Val
fields["gain"] = t.Gain.Val
fields["guest-num_sta"] = t.GuestNumSta.Val
fields["num_sta"] = t.NumSta.Val
fields["radio"] = t.Radio
fields["tx_packets"] = t.TxPackets.Val
fields["tx_power"] = t.TxPower.Val
fields["tx_retries"] = t.TxRetries.Val
fields["user-num_sta"] = t.UserNumSta.Val
break
}
}
r.send(&metric{Table: "uap_radios", Tags: tags, Fields: fields})
}
}

View File

@ -1,133 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// Combine concatenates N maps. This will delete things if not used with caution.
func Combine(in ...map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{})
for i := range in {
for k := range in[i] {
out[k] = in[i][k]
}
}
return out
}
// batchSysStats is used by all device types.
func (u *InfluxUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]interface{} {
return map[string]interface{}{
"loadavg_1": s.Loadavg1.Val,
"loadavg_5": s.Loadavg5.Val,
"loadavg_15": s.Loadavg15.Val,
"mem_used": s.MemUsed.Val,
"mem_buffer": s.MemBuffer.Val,
"mem_total": s.MemTotal.Val,
"cpu": ss.CPU.Val,
"mem": ss.Mem.Val,
"system_uptime": ss.Uptime.Val,
}
}
// batchUDM generates Unifi Gateway datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchUDM(r report, s *unifi.UDM) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"source": s.SourceName,
"mac": s.Mac,
"site_name": s.SiteName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields := Combine(
u.batchUSGstat(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
u.batchSysStats(s.SysStats, s.SystemStats),
map[string]interface{}{
"source": s.SourceName,
"ip": s.IP,
"bytes": s.Bytes.Val,
"last_seen": s.LastSeen.Val,
"license_state": s.LicenseState,
"guest-num_sta": s.GuestNumSta.Val,
"rx_bytes": s.RxBytes.Val,
"tx_bytes": s.TxBytes.Val,
"uptime": s.Uptime.Val,
"state": s.State.Val,
"user-num_sta": s.UserNumSta.Val,
"version": s.Version,
"num_desktop": s.NumDesktop.Val,
"num_handheld": s.NumHandheld.Val,
"num_mobile": s.NumMobile.Val,
},
)
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
u.batchNetTable(r, tags, s.NetworkTable)
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
tags = map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields = Combine(
u.batchUSWstat(s.Stat.Sw),
map[string]interface{}{
"guest-num_sta": s.GuestNumSta.Val,
"ip": s.IP,
"bytes": s.Bytes.Val,
"last_seen": s.LastSeen.Val,
"rx_bytes": s.RxBytes.Val,
"tx_bytes": s.TxBytes.Val,
"uptime": s.Uptime.Val,
"state": s.State.Val,
})
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
u.batchPortTable(r, tags, s.PortTable)
if s.Stat.Ap == nil {
return // we're done now. the following code process UDM (non-pro) UAP data.
}
tags = map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields = u.processUAPstats(s.Stat.Ap)
fields["ip"] = s.IP
fields["bytes"] = s.Bytes.Val
fields["last_seen"] = s.LastSeen.Val
fields["rx_bytes"] = s.RxBytes.Val
fields["tx_bytes"] = s.TxBytes.Val
fields["uptime"] = s.Uptime.Val
fields["state"] = s.State
fields["user-num_sta"] = int(s.UserNumSta.Val)
fields["guest-num_sta"] = int(s.GuestNumSta.Val)
fields["num_sta"] = s.NumSta.Val
r.send(&metric{Table: "uap", Tags: tags, Fields: fields})
u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
u.processVAPTable(r, tags, *s.VapTable)
}

View File

@ -1,140 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchUSG generates Unifi Gateway datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchUSG(r report, s *unifi.USG) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields := Combine(
u.batchSysStats(s.SysStats, s.SystemStats),
u.batchUSGstat(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
map[string]interface{}{
"ip": s.IP,
"bytes": s.Bytes.Val,
"last_seen": s.LastSeen.Val,
"license_state": s.LicenseState,
"guest-num_sta": s.GuestNumSta.Val,
"rx_bytes": s.RxBytes.Val,
"tx_bytes": s.TxBytes.Val,
"uptime": s.Uptime.Val,
"state": s.State.Val,
"user-num_sta": s.UserNumSta.Val,
"version": s.Version,
"num_desktop": s.NumDesktop.Val,
"num_handheld": s.NumHandheld.Val,
"num_mobile": s.NumMobile.Val,
},
)
r.send(&metric{Table: "usg", Tags: tags, Fields: fields})
u.batchNetTable(r, tags, s.NetworkTable)
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
}
func (u *InfluxUnifi) batchUSGstat(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]interface{} {
if gw == nil {
return map[string]interface{}{}
}
return map[string]interface{}{
"uplink_latency": ul.Latency.Val,
"uplink_speed": ul.Speed.Val,
"speedtest-status_latency": ss.Latency.Val,
"speedtest-status_runtime": ss.Runtime.Val,
"speedtest-status_ping": ss.StatusPing.Val,
"speedtest-status_xput_download": ss.XputDownload.Val,
"speedtest-status_xput_upload": ss.XputUpload.Val,
"lan-rx_bytes": gw.LanRxBytes.Val,
"lan-rx_packets": gw.LanRxPackets.Val,
"lan-tx_bytes": gw.LanTxBytes.Val,
"lan-tx_packets": gw.LanTxPackets.Val,
"lan-rx_dropped": gw.LanRxDropped.Val,
}
}
func (u *InfluxUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
for _, wan := range wans {
if !wan.Up.Val {
continue
}
tags := map[string]string{
"device_name": tags["name"],
"site_name": tags["site_name"],
"source": tags["source"],
"ip": wan.IP,
"purpose": wan.Name,
"mac": wan.Mac,
"ifname": wan.Ifname,
"type": wan.Type,
"up": wan.Up.Txt,
"enabled": wan.Enable.Txt,
}
fields := map[string]interface{}{
"bytes-r": wan.BytesR.Val,
"full_duplex": wan.FullDuplex.Val,
"gateway": wan.Gateway,
"max_speed": wan.MaxSpeed.Val,
"rx_bytes": wan.RxBytes.Val,
"rx_bytes-r": wan.RxBytesR.Val,
"rx_dropped": wan.RxDropped.Val,
"rx_errors": wan.RxErrors.Val,
"rx_broadcast": wan.RxBroadcast.Val,
"rx_multicast": wan.RxMulticast.Val,
"rx_packets": wan.RxPackets.Val,
"speed": wan.Speed.Val,
"tx_bytes": wan.TxBytes.Val,
"tx_bytes-r": wan.TxBytesR.Val,
"tx_dropped": wan.TxDropped.Val,
"tx_errors": wan.TxErrors.Val,
"tx_packets": wan.TxPackets.Val,
"tx_broadcast": wan.TxBroadcast.Val,
"tx_multicast": wan.TxMulticast.Val,
}
r.send(&metric{Table: "usg_wan_ports", Tags: tags, Fields: fields})
}
}
func (u *InfluxUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
for _, p := range nt {
tags := map[string]string{
"device_name": tags["name"],
"site_name": tags["site_name"],
"source": tags["source"],
"up": p.Up.Txt,
"enabled": p.Enabled.Txt,
"ip": p.IP,
"mac": p.Mac,
"name": p.Name,
"domain_name": p.DomainName,
"purpose": p.Purpose,
"is_guest": p.IsGuest.Txt,
}
fields := map[string]interface{}{
"num_sta": p.NumSta.Val,
"rx_bytes": p.RxBytes.Val,
"rx_packets": p.RxPackets.Val,
"tx_bytes": p.TxBytes.Val,
"tx_packets": p.TxPackets.Val,
}
r.send(&metric{Table: "usg_networks", Tags: tags, Fields: fields})
}
}

View File

@ -1,114 +0,0 @@
package influxunifi
import (
"golift.io/unifi"
)
// batchUSW generates Unifi Switch datapoints for InfluxDB.
// These points can be passed directly to influx.
func (u *InfluxUnifi) batchUSW(r report, s *unifi.USW) {
if !s.Adopted.Val || s.Locating.Val {
return
}
tags := map[string]string{
"mac": s.Mac,
"site_name": s.SiteName,
"source": s.SourceName,
"name": s.Name,
"version": s.Version,
"model": s.Model,
"serial": s.Serial,
"type": s.Type,
}
fields := Combine(
u.batchUSWstat(s.Stat.Sw),
u.batchSysStats(s.SysStats, s.SystemStats),
map[string]interface{}{
"guest-num_sta": s.GuestNumSta.Val,
"ip": s.IP,
"bytes": s.Bytes.Val,
"fan_level": s.FanLevel.Val,
"general_temperature": s.GeneralTemperature.Val,
"last_seen": s.LastSeen.Val,
"rx_bytes": s.RxBytes.Val,
"tx_bytes": s.TxBytes.Val,
"uptime": s.Uptime.Val,
"state": s.State.Val,
"user-num_sta": s.UserNumSta.Val,
})
r.send(&metric{Table: "usw", Tags: tags, Fields: fields})
u.batchPortTable(r, tags, s.PortTable)
}
func (u *InfluxUnifi) batchUSWstat(sw *unifi.Sw) map[string]interface{} {
if sw == nil {
return map[string]interface{}{}
}
return map[string]interface{}{
"stat_bytes": sw.Bytes.Val,
"stat_rx_bytes": sw.RxBytes.Val,
"stat_rx_crypts": sw.RxCrypts.Val,
"stat_rx_dropped": sw.RxDropped.Val,
"stat_rx_errors": sw.RxErrors.Val,
"stat_rx_frags": sw.RxFrags.Val,
"stat_rx_packets": sw.TxPackets.Val,
"stat_tx_bytes": sw.TxBytes.Val,
"stat_tx_dropped": sw.TxDropped.Val,
"stat_tx_errors": sw.TxErrors.Val,
"stat_tx_packets": sw.TxPackets.Val,
"stat_tx_retries": sw.TxRetries.Val,
}
}
func (u *InfluxUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
for _, p := range pt {
if !p.Up.Val || !p.Enable.Val {
continue // only record UP ports.
}
tags := map[string]string{
"site_name": t["site_name"],
"device_name": t["name"],
"source": t["source"],
"name": p.Name,
"poe_mode": p.PoeMode,
"port_poe": p.PortPoe.Txt,
"port_idx": p.PortIdx.Txt,
"port_id": t["name"] + " Port " + p.PortIdx.Txt,
"poe_enable": p.PoeEnable.Txt,
"flowctrl_rx": p.FlowctrlRx.Txt,
"flowctrl_tx": p.FlowctrlTx.Txt,
"media": p.Media,
}
fields := map[string]interface{}{
"dbytes_r": p.BytesR.Val,
"rx_broadcast": p.RxBroadcast.Val,
"rx_bytes": p.RxBytes.Val,
"rx_bytes-r": p.RxBytesR.Val,
"rx_dropped": p.RxDropped.Val,
"rx_errors": p.RxErrors.Val,
"rx_multicast": p.RxMulticast.Val,
"rx_packets": p.RxPackets.Val,
"speed": p.Speed.Val,
"stp_pathcost": p.StpPathcost.Val,
"tx_broadcast": p.TxBroadcast.Val,
"tx_bytes": p.TxBytes.Val,
"tx_bytes-r": p.TxBytesR.Val,
"tx_dropped": p.TxDropped.Val,
"tx_errors": p.TxErrors.Val,
"tx_multicast": p.TxMulticast.Val,
"tx_packets": p.TxPackets.Val,
}
if p.PoeEnable.Val && p.PortPoe.Val {
fields["poe_current"] = p.PoeCurrent.Val
fields["poe_power"] = p.PoePower.Val
fields["poe_voltage"] = p.PoeVoltage.Val
}
r.send(&metric{Table: "usw_ports", Tags: tags, Fields: fields})
}
}

View File

@ -1,9 +0,0 @@
// +build darwin
package poller
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = "/usr/local/etc/unifi-poller/up.conf"
// DefaultObjPath is the path to look for shared object libraries (plugins).
const DefaultObjPath = "/usr/local/lib/unifi-poller"

View File

@ -1,9 +0,0 @@
// +build !windows,!darwin
package poller
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = "/etc/unifi-poller/up.conf"
// DefaultObjPath is the path to look for shared object libraries (plugins).
const DefaultObjPath = "/usr/lib/unifi-poller"

View File

@ -1,9 +0,0 @@
// +build windows
package poller
// DefaultConfFile is where to find config if --config is not prvided.
const DefaultConfFile = `C:\ProgramData\unifi-poller\up.conf`
// DefaultObjPath is useless in this context. Bummer.
const DefaultObjPath = "PLUGINS_DO_NOT_WORK_ON_WINDOWS_SOWWWWWY"

View File

@ -1,150 +0,0 @@
package poller
/*
I consider this file the pinacle example of how to allow a Go application to be configured from a file.
You can put your configuration into any file format: XML, YAML, JSON, TOML, and you can override any
struct member using an environment variable. The Duration type is also supported. All of the Config{}
and Duration{} types and methods are reusable in other projects. Just adjust the data in the struct to
meet your app's needs. See the New() procedure and Start() method in start.go for example usage.
*/
import (
"os"
"path"
"plugin"
"strings"
"time"
"github.com/spf13/pflag"
"golift.io/cnfg"
"golift.io/cnfg/cnfgfile"
"golift.io/unifi"
)
const (
// AppName is the name of the application.
AppName = "unifi-poller"
// ENVConfigPrefix is the prefix appended to an env variable tag name.
ENVConfigPrefix = "UP"
)
// UnifiPoller contains the application startup data, and auth info for UniFi & Influx.
type UnifiPoller struct {
Flags *Flags
*Config
}
// Flags represents the CLI args available and their settings.
type Flags struct {
ConfigFile string
DumpJSON string
ShowVer bool
*pflag.FlagSet
}
// Metrics is a type shared by the exporting and reporting packages.
type Metrics struct {
TS time.Time
unifi.Sites
unifi.IDSList
unifi.Clients
*unifi.Devices
SitesDPI []*unifi.DPITable
ClientsDPI []*unifi.DPITable
}
// Config represents the core library input data.
type Config struct {
*Poller `json:"poller" toml:"poller" xml:"poller" yaml:"poller"`
}
// Poller is the global config values.
type Poller struct {
Plugins []string `json:"plugins" toml:"plugins" xml:"plugin" yaml:"plugins"`
Debug bool `json:"debug" toml:"debug" xml:"debug,attr" yaml:"debug"`
Quiet bool `json:"quiet,omitempty" toml:"quiet,omitempty" xml:"quiet,attr" yaml:"quiet"`
}
// LoadPlugins reads-in dynamic shared libraries.
// Not used very often, if at all.
func (u *UnifiPoller) LoadPlugins() error {
for _, p := range u.Plugins {
name := strings.TrimSuffix(p, ".so") + ".so"
if name == ".so" {
continue // Just ignore it. uhg.
}
if _, err := os.Stat(name); os.IsNotExist(err) {
name = path.Join(DefaultObjPath, name)
}
u.Logf("Loading Dynamic Plugin: %s", name)
if _, err := plugin.Open(name); err != nil {
return err
}
}
return nil
}
// ParseConfigs parses the poller config and the config for each registered output plugin.
func (u *UnifiPoller) ParseConfigs() error {
// Parse core config.
if err := u.parseInterface(u.Config); err != nil {
return err
}
// Load dynamic plugins.
if err := u.LoadPlugins(); err != nil {
return err
}
if err := u.parseInputs(); err != nil {
return err
}
return u.parseOutputs()
}
// parseInterface parses the config file and environment variables into the provided interface.
func (u *UnifiPoller) parseInterface(i interface{}) error {
// Parse config file into provided interface.
if err := cnfgfile.Unmarshal(i, u.Flags.ConfigFile); err != nil {
return err
}
// Parse environment variables into provided interface.
_, err := cnfg.UnmarshalENV(i, ENVConfigPrefix)
return err
}
// Parse input plugin configs.
func (u *UnifiPoller) parseInputs() error {
inputSync.Lock()
defer inputSync.Unlock()
for _, i := range inputs {
if err := u.parseInterface(i.Config); err != nil {
return err
}
}
return nil
}
// Parse output plugin configs.
func (u *UnifiPoller) parseOutputs() error {
outputSync.Lock()
defer outputSync.Unlock()
for _, o := range outputs {
if err := u.parseInterface(o.Config); err != nil {
return err
}
}
return nil
}

View File

@ -1,33 +0,0 @@
package poller
import (
"fmt"
"strconv"
"strings"
)
// DumpJSONPayload prints raw json from the UniFi Controller. This is currently
// tied into the -j CLI arg, and is probably not very useful outside that context.
func (u *UnifiPoller) DumpJSONPayload() (err error) {
u.Config.Quiet = true
split := strings.SplitN(u.Flags.DumpJSON, " ", 2)
filter := &Filter{Kind: split[0]}
if split2 := strings.Split(filter.Kind, ":"); len(split2) > 1 {
filter.Kind = split2[0]
filter.Unit, _ = strconv.Atoi(split2[1])
}
if len(split) > 1 {
filter.Path = split[1]
}
m, err := inputs[0].RawMetrics(filter)
if err != nil {
return err
}
fmt.Println(string(m))
return nil
}

View File

@ -1,165 +0,0 @@
package poller
import (
"fmt"
"strings"
"sync"
"golift.io/unifi"
)
var (
inputs []*InputPlugin
inputSync sync.Mutex
)
// Input plugins must implement this interface.
type Input interface {
Initialize(Logger) error // Called once on startup to initialize the plugin.
Metrics() (*Metrics, bool, error) // Called every time new metrics are requested.
MetricsFrom(*Filter) (*Metrics, bool, error) // Called every time new metrics are requested.
RawMetrics(*Filter) ([]byte, error)
}
// InputPlugin describes an input plugin's consumable interface.
type InputPlugin struct {
Name string
Config interface{} // Each config is passed into an unmarshaller later.
Input
}
// Filter is used for metrics filters. Many fields for lots of expansion.
type Filter struct {
Type string
Term string
Name string
Tags string
Role string
Kind string
Path string
Area int
Item int
Unit int
Sign int64
Mass int64
Rate float64
Cost float64
Free bool
True bool
Done bool
Stop bool
}
// NewInput creates a metric input. This should be called by input plugins
// init() functions.
func NewInput(i *InputPlugin) {
inputSync.Lock()
defer inputSync.Unlock()
if i == nil || i.Input == nil {
panic("nil output or method passed to poller.NewOutput")
}
inputs = append(inputs, i)
}
// InitializeInputs runs the passed-in initializer method for each input plugin.
func (u *UnifiPoller) InitializeInputs() error {
inputSync.Lock()
defer inputSync.Unlock()
for _, input := range inputs {
// This must return, or the app locks up here.
if err := input.Initialize(u); err != nil {
return err
}
}
return nil
}
// Metrics aggregates all the measurements from all configured inputs and returns them.
func (u *UnifiPoller) Metrics() (*Metrics, bool, error) {
errs := []string{}
metrics := &Metrics{}
ok := false
for _, input := range inputs {
m, _, err := input.Metrics()
if err != nil {
errs = append(errs, err.Error())
}
if m == nil {
continue
}
ok = true
metrics = AppendMetrics(metrics, m)
}
var err error
if len(errs) > 0 {
err = fmt.Errorf(strings.Join(errs, ", "))
}
return metrics, ok, err
}
// MetricsFrom aggregates all the measurements from filtered inputs and returns them.
func (u *UnifiPoller) MetricsFrom(filter *Filter) (*Metrics, bool, error) {
errs := []string{}
metrics := &Metrics{}
ok := false
for _, input := range inputs {
if !strings.EqualFold(input.Name, filter.Name) {
continue
}
m, _, err := input.MetricsFrom(filter)
if err != nil {
errs = append(errs, err.Error())
}
if m == nil {
continue
}
ok = true
metrics = AppendMetrics(metrics, m)
}
var err error
if len(errs) > 0 {
err = fmt.Errorf(strings.Join(errs, ", "))
}
return metrics, ok, err
}
// AppendMetrics combined the metrics from two sources.
func AppendMetrics(existing *Metrics, m *Metrics) *Metrics {
existing.SitesDPI = append(existing.SitesDPI, m.SitesDPI...)
existing.Sites = append(existing.Sites, m.Sites...)
existing.ClientsDPI = append(existing.ClientsDPI, m.ClientsDPI...)
existing.Clients = append(existing.Clients, m.Clients...)
existing.IDSList = append(existing.IDSList, m.IDSList...)
if m.Devices == nil {
return existing
}
if existing.Devices == nil {
existing.Devices = &unifi.Devices{}
}
existing.UAPs = append(existing.UAPs, m.UAPs...)
existing.USGs = append(existing.USGs, m.USGs...)
existing.USWs = append(existing.USWs, m.USWs...)
existing.UDMs = append(existing.UDMs, m.UDMs...)
return existing
}

View File

@ -1,34 +0,0 @@
package poller
import (
"fmt"
"log"
)
const callDepth = 2
// Logger is passed into input packages so they may write logs.
type Logger interface {
Logf(m string, v ...interface{})
LogErrorf(m string, v ...interface{})
LogDebugf(m string, v ...interface{})
}
// Logf prints a log entry if quiet is false.
func (u *UnifiPoller) Logf(m string, v ...interface{}) {
if !u.Quiet {
_ = log.Output(callDepth, fmt.Sprintf("[INFO] "+m, v...))
}
}
// LogDebugf prints a debug log entry if debug is true and quite is false
func (u *UnifiPoller) LogDebugf(m string, v ...interface{}) {
if u.Debug && !u.Quiet {
_ = log.Output(callDepth, fmt.Sprintf("[DEBUG] "+m, v...))
}
}
// LogErrorf prints an error log entry.
func (u *UnifiPoller) LogErrorf(m string, v ...interface{}) {
_ = log.Output(callDepth, fmt.Sprintf("[ERROR] "+m, v...))
}

View File

@ -1,72 +0,0 @@
package poller
import (
"fmt"
"sync"
)
var (
outputs []*Output
outputSync sync.Mutex
)
// Collect is passed into output packages so they may collect metrics to output.
// Output packages must implement this interface.
type Collect interface {
Metrics() (*Metrics, bool, error)
MetricsFrom(*Filter) (*Metrics, bool, error)
Logger
}
// Output defines the output data for a metric exporter like influx or prometheus.
// Output packages should call NewOutput with this struct in init().
type Output struct {
Name string
Config interface{} // Each config is passed into an unmarshaller later.
Method func(Collect) error // Called on startup for each configured output.
}
// NewOutput should be called by each output package's init function.
func NewOutput(o *Output) {
outputSync.Lock()
defer outputSync.Unlock()
if o == nil || o.Method == nil {
panic("nil output or method passed to poller.NewOutput")
}
outputs = append(outputs, o)
}
// InitializeOutputs runs all the configured output plugins.
// If none exist, or they all exit an error is returned.
func (u *UnifiPoller) InitializeOutputs() error {
v := make(chan error)
defer close(v)
var count int
for _, o := range outputs {
count++
go func(o *Output) {
v <- o.Method(u)
}(o)
}
if count < 1 {
return fmt.Errorf("no output plugins imported")
}
for err := range v {
if err != nil {
return err
}
if count--; count < 1 {
return fmt.Errorf("all output plugins have stopped, or none enabled")
}
}
return nil
}

View File

@ -1,83 +0,0 @@
// Package poller provides the CLI interface to setup unifi-poller.
package poller
import (
"fmt"
"log"
"os"
"github.com/prometheus/common/version"
"github.com/spf13/pflag"
)
// New returns a new poller struct.
func New() *UnifiPoller {
return &UnifiPoller{Config: &Config{Poller: &Poller{}}, Flags: &Flags{}}
}
// Start begins the application from a CLI.
// Parses cli flags, parses config file, parses env vars, sets up logging, then:
// - dumps a json payload OR - executes Run().
func (u *UnifiPoller) Start() error {
log.SetOutput(os.Stdout)
log.SetFlags(log.LstdFlags)
u.Flags.Parse(os.Args[1:])
if u.Flags.ShowVer {
fmt.Printf("%s v%s\n", AppName, version.Version)
return nil // don't run anything else w/ version request.
}
if u.Flags.DumpJSON == "" { // do not print this when dumping JSON.
u.Logf("Loading Configuration File: %s", u.Flags.ConfigFile)
}
// Parse config file and ENV variables.
if err := u.ParseConfigs(); err != nil {
return err
}
return u.Run()
}
// Parse turns CLI arguments into data structures. Called by Start() on startup.
func (f *Flags) Parse(args []string) {
f.FlagSet = pflag.NewFlagSet(AppName, pflag.ExitOnError)
f.Usage = func() {
fmt.Printf("Usage: %s [--config=/path/to/up.conf] [--version]", AppName)
f.PrintDefaults()
}
f.StringVarP(&f.DumpJSON, "dumpjson", "j", "",
"This debug option prints a json payload and exits. See man page for more info.")
f.StringVarP(&f.ConfigFile, "config", "c", DefaultConfFile, "Poller config file path.")
f.BoolVarP(&f.ShowVer, "version", "v", false, "Print the version and exit.")
_ = f.FlagSet.Parse(args) // pflag.ExitOnError means this will never return error.
}
// Run picks a mode and executes the associated functions. This will do one of three things:
// 1. Start the collector routine that polls unifi and reports to influx on an interval. (default)
// 2. Run the collector one time and report the metrics to influxdb. (lambda)
// 3. Start a web server and wait for Prometheus to poll the application for metrics.
func (u *UnifiPoller) Run() error {
if u.Flags.DumpJSON != "" {
if err := u.InitializeInputs(); err != nil {
return err
}
return u.DumpJSONPayload()
}
if u.Debug {
log.SetFlags(log.Lshortfile | log.Lmicroseconds | log.Ldate)
u.LogDebugf("Debug Logging Enabled")
}
log.Printf("[INFO] UniFi Poller v%v Starting Up! PID: %d", version.Version, os.Getpid())
if err := u.InitializeInputs(); err != nil {
return err
}
return u.InitializeOutputs()
}

View File

@ -1,4 +0,0 @@
# prometheus
This package provides the interface to turn UniFi measurements into prometheus
exported metrics. Requires the poller package for actual UniFi data collection.

View File

@ -1,136 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
type uclient struct {
Anomalies *prometheus.Desc
BytesR *prometheus.Desc
CCQ *prometheus.Desc
Satisfaction *prometheus.Desc
Noise *prometheus.Desc
RoamCount *prometheus.Desc
RSSI *prometheus.Desc
RxBytes *prometheus.Desc
RxBytesR *prometheus.Desc
RxPackets *prometheus.Desc
RxRate *prometheus.Desc
Signal *prometheus.Desc
TxBytes *prometheus.Desc
TxBytesR *prometheus.Desc
TxPackets *prometheus.Desc
TxRetries *prometheus.Desc
TxPower *prometheus.Desc
TxRate *prometheus.Desc
Uptime *prometheus.Desc
WifiTxAttempts *prometheus.Desc
WiredRxBytes *prometheus.Desc
WiredRxBytesR *prometheus.Desc
WiredRxPackets *prometheus.Desc
WiredTxBytes *prometheus.Desc
WiredTxBytesR *prometheus.Desc
WiredTxPackets *prometheus.Desc
DPITxPackets *prometheus.Desc
DPIRxPackets *prometheus.Desc
DPITxBytes *prometheus.Desc
DPIRxBytes *prometheus.Desc
}
func descClient(ns string) *uclient {
labels := []string{"name", "mac", "site_name", "gw_name", "sw_name", "vlan",
"ip", "oui", "network", "sw_port", "ap_name", "source", "wired"}
labelW := append([]string{"radio_name", "radio", "radio_proto", "channel", "essid", "bssid", "radio_desc"}, labels...)
labelDPI := []string{"name", "mac", "site_name", "source", "category", "application"}
return &uclient{
Anomalies: prometheus.NewDesc(ns+"anomalies", "Client Anomalies", labelW, nil),
BytesR: prometheus.NewDesc(ns+"transfer_rate_bytes", "Client Data Rate", labelW, nil),
CCQ: prometheus.NewDesc(ns+"ccq_ratio", "Client Connection Quality", labelW, nil),
Satisfaction: prometheus.NewDesc(ns+"satisfaction_ratio", "Client Satisfaction", labelW, nil),
Noise: prometheus.NewDesc(ns+"noise_db", "Client AP Noise", labelW, nil),
RoamCount: prometheus.NewDesc(ns+"roam_count_total", "Client Roam Counter", labelW, nil),
RSSI: prometheus.NewDesc(ns+"rssi_db", "Client RSSI", labelW, nil),
RxBytes: prometheus.NewDesc(ns+"receive_bytes_total", "Client Receive Bytes", labels, nil),
RxBytesR: prometheus.NewDesc(ns+"receive_rate_bytes", "Client Receive Data Rate", labels, nil),
RxPackets: prometheus.NewDesc(ns+"receive_packets_total", "Client Receive Packets", labels, nil),
RxRate: prometheus.NewDesc(ns+"radio_receive_rate_bps", "Client Receive Rate", labelW, nil),
Signal: prometheus.NewDesc(ns+"radio_signal_db", "Client Signal Strength", labelW, nil),
TxBytes: prometheus.NewDesc(ns+"transmit_bytes_total", "Client Transmit Bytes", labels, nil),
TxBytesR: prometheus.NewDesc(ns+"transmit_rate_bytes", "Client Transmit Data Rate", labels, nil),
TxPackets: prometheus.NewDesc(ns+"transmit_packets_total", "Client Transmit Packets", labels, nil),
TxRetries: prometheus.NewDesc(ns+"transmit_retries_total", "Client Transmit Retries", labels, nil),
TxPower: prometheus.NewDesc(ns+"radio_transmit_power_dbm", "Client Transmit Power", labelW, nil),
TxRate: prometheus.NewDesc(ns+"radio_transmit_rate_bps", "Client Transmit Rate", labelW, nil),
WifiTxAttempts: prometheus.NewDesc(ns+"wifi_attempts_transmit_total", "Client Wifi Transmit Attempts", labelW, nil),
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Client Uptime", labelW, nil),
DPITxPackets: prometheus.NewDesc(ns+"dpi_transmit_packets", "Client DPI Transmit Packets", labelDPI, nil),
DPIRxPackets: prometheus.NewDesc(ns+"dpi_receive_packets", "Client DPI Receive Packets", labelDPI, nil),
DPITxBytes: prometheus.NewDesc(ns+"dpi_transmit_bytes", "Client DPI Transmit Bytes", labelDPI, nil),
DPIRxBytes: prometheus.NewDesc(ns+"dpi_receive_bytes", "Client DPI Receive Bytes", labelDPI, nil),
}
}
func (u *promUnifi) exportClientDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
labelDPI := []string{s.Name, s.MAC, s.SiteName, s.SourceName,
unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App)}
// log.Println(labelDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
r.send([]*metric{
{u.Client.DPITxPackets, gauge, dpi.TxPackets, labelDPI},
{u.Client.DPIRxPackets, gauge, dpi.RxPackets, labelDPI},
{u.Client.DPITxBytes, gauge, dpi.TxBytes, labelDPI},
{u.Client.DPIRxBytes, gauge, dpi.RxBytes, labelDPI},
})
}
}
func (u *promUnifi) exportClient(r report, c *unifi.Client) {
labels := []string{c.Name, c.Mac, c.SiteName, c.GwName, c.SwName, c.Vlan.Txt,
c.IP, c.Oui, c.Network, c.SwPort.Txt, c.ApName, c.SourceName, ""}
labelW := append([]string{c.RadioName, c.Radio, c.RadioProto, c.Channel.Txt,
c.Essid, c.Bssid, c.RadioDescription}, labels...)
if c.IsWired.Val {
labels[len(labels)-1] = "true"
labelW[len(labelW)-1] = "true"
r.send([]*metric{
{u.Client.RxBytes, counter, c.WiredRxBytes, labels},
{u.Client.RxBytesR, gauge, c.WiredRxBytesR, labels},
{u.Client.RxPackets, counter, c.WiredRxPackets, labels},
{u.Client.TxBytes, counter, c.WiredTxBytes, labels},
{u.Client.TxBytesR, gauge, c.WiredTxBytesR, labels},
{u.Client.TxPackets, counter, c.WiredTxPackets, labels},
})
} else {
labels[len(labels)-1] = "false"
labelW[len(labelW)-1] = "false"
r.send([]*metric{
{u.Client.Anomalies, counter, c.Anomalies, labelW},
{u.Client.CCQ, gauge, float64(c.Ccq) / 1000.0, labelW},
{u.Client.Satisfaction, gauge, c.Satisfaction.Val / 100.0, labelW},
{u.Client.Noise, gauge, c.Noise, labelW},
{u.Client.RoamCount, counter, c.RoamCount, labelW},
{u.Client.RSSI, gauge, c.Rssi, labelW},
{u.Client.Signal, gauge, c.Signal, labelW},
{u.Client.TxPower, gauge, c.TxPower, labelW},
{u.Client.TxRate, gauge, c.TxRate * 1000, labelW},
{u.Client.WifiTxAttempts, counter, c.WifiTxAttempts, labelW},
{u.Client.RxRate, gauge, c.RxRate * 1000, labelW},
{u.Client.TxRetries, counter, c.TxRetries, labels},
{u.Client.TxBytes, counter, c.TxBytes, labels},
{u.Client.TxBytesR, gauge, c.TxBytesR, labels},
{u.Client.TxPackets, counter, c.TxPackets, labels},
{u.Client.RxBytes, counter, c.RxBytes, labels},
{u.Client.RxBytesR, gauge, c.RxBytesR, labels},
{u.Client.RxPackets, counter, c.RxPackets, labels},
{u.Client.BytesR, gauge, c.BytesR, labelW},
})
}
r.send([]*metric{{u.Client.Uptime, gauge, c.Uptime, labelW}})
}

View File

@ -1,346 +0,0 @@
// Package promunifi provides the bridge between unifi-poller metrics and prometheus.
package promunifi
import (
"fmt"
"net/http"
"reflect"
"strings"
"sync"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
"golift.io/unifi"
)
const (
// channel buffer, fits at least one batch.
defaultBuffer = 50
defaultHTTPListen = "0.0.0.0:9130"
// simply fewer letters.
counter = prometheus.CounterValue
gauge = prometheus.GaugeValue
)
type promUnifi struct {
*Config `json:"prometheus" toml:"prometheus" xml:"prometheus" yaml:"prometheus"`
Client *uclient
Device *unifiDevice
UAP *uap
USG *usg
USW *usw
Site *site
// This interface is passed to the Collect() method. The Collect method uses
// this interface to retrieve the latest UniFi measurements and export them.
Collector poller.Collect
}
// Config is the input (config file) data used to initialize this output plugin.
type Config struct {
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string `json:"namespace" toml:"namespace" xml:"namespace" yaml:"namespace"`
HTTPListen string `json:"http_listen" toml:"http_listen" xml:"http_listen" yaml:"http_listen"`
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. Possibly, no metrics
// will be collected at all.
ReportErrors bool `json:"report_errors" toml:"report_errors" xml:"report_errors" yaml:"report_errors"`
Disable bool `json:"disable" toml:"disable" xml:"disable" yaml:"disable"`
// Buffer is a channel buffer.
// Default is probably 50. Seems fast there; try 1 to see if CPU usage goes down?
Buffer int `json:"buffer" toml:"buffer" xml:"buffer" yaml:"buffer"`
}
type metric struct {
Desc *prometheus.Desc
ValueType prometheus.ValueType
Value interface{}
Labels []string
}
// Report accumulates counters that are printed to a log line.
type Report struct {
*Config
Total int // Total count of metrics recorded.
Errors int // Total count of errors recording metrics.
Zeros int // Total count of metrics equal to zero.
Metrics *poller.Metrics // Metrics collected and recorded.
Elapsed time.Duration // Duration elapsed collecting and exporting.
Fetch time.Duration // Duration elapsed making controller requests.
Start time.Time // Time collection began.
ch chan []*metric
wg sync.WaitGroup
}
// target is used for targeted (sometimes dynamic) metrics scrapes.
type target struct {
*poller.Filter
u *promUnifi
}
func init() {
u := &promUnifi{Config: &Config{}}
poller.NewOutput(&poller.Output{
Name: "prometheus",
Config: u,
Method: u.Run,
})
}
// Run creates the collectors and starts the web server up.
// Should be run in a Go routine. Returns nil if not configured.
func (u *promUnifi) Run(c poller.Collect) error {
if u.Disable {
return nil
}
u.Namespace = strings.Trim(strings.Replace(u.Namespace, "-", "_", -1), "_")
if u.Namespace == "" {
u.Namespace = strings.Replace(poller.AppName, "-", "", -1)
}
if u.HTTPListen == "" {
u.HTTPListen = defaultHTTPListen
}
if u.Buffer == 0 {
u.Buffer = defaultBuffer
}
// Later can pass this in from poller by adding a method to the interface.
u.Collector = c
u.Client = descClient(u.Namespace + "_client_")
u.Device = descDevice(u.Namespace + "_device_") // stats for all device types.
u.UAP = descUAP(u.Namespace + "_device_")
u.USG = descUSG(u.Namespace + "_device_")
u.USW = descUSW(u.Namespace + "_device_")
u.Site = descSite(u.Namespace + "_site_")
mux := http.NewServeMux()
prometheus.MustRegister(version.NewCollector(u.Namespace))
prometheus.MustRegister(u)
c.Logf("Prometheus exported at https://%s/ - namespace: %s", u.HTTPListen, u.Namespace)
mux.Handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer,
promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
))
mux.HandleFunc("/scrape", u.ScrapeHandler)
mux.HandleFunc("/", u.DefaultHandler)
return http.ListenAndServe(u.HTTPListen, mux)
}
// ScrapeHandler allows prometheus to scrape a single source, instead of all sources.
func (u *promUnifi) ScrapeHandler(w http.ResponseWriter, r *http.Request) {
t := &target{u: u, Filter: &poller.Filter{
Name: r.URL.Query().Get("input"), // "unifi"
Path: r.URL.Query().Get("path"), // url: "https://127.0.0.1:8443"
Role: r.URL.Query().Get("role"), // configured role in up.conf.
}}
if t.Name == "" {
u.Collector.LogErrorf("input parameter missing on scrape from %v", r.RemoteAddr)
http.Error(w, `'input' parameter must be specified (try "unifi")`, 400)
return
}
if t.Role == "" && t.Path == "" {
u.Collector.LogErrorf("role and path parameters missing on scrape from %v", r.RemoteAddr)
http.Error(w, "'role' OR 'path' parameter must be specified: configured role OR unconfigured url", 400)
return
}
registry := prometheus.NewRegistry()
registry.MustRegister(t)
promhttp.HandlerFor(
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError},
).ServeHTTP(w, r)
}
func (u *promUnifi) DefaultHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, _ = w.Write([]byte(poller.AppName + "\n"))
}
// Describe satisfies the prometheus Collector. This returns all of the
// metric descriptions that this packages produces.
func (t *target) Describe(ch chan<- *prometheus.Desc) {
t.u.Describe(ch)
}
// Describe satisfies the prometheus Collector. This returns all of the
// metric descriptions that this packages produces.
func (u *promUnifi) Describe(ch chan<- *prometheus.Desc) {
for _, f := range []interface{}{u.Client, u.Device, u.UAP, u.USG, u.USW, u.Site} {
v := reflect.Indirect(reflect.ValueOf(f))
// Loop each struct member and send it to the provided channel.
for i := 0; i < v.NumField(); i++ {
desc, ok := v.Field(i).Interface().(*prometheus.Desc)
if ok && desc != nil {
ch <- desc
}
}
}
}
// Collect satisfies the prometheus Collector. This runs for a single controller poll.
func (t *target) Collect(ch chan<- prometheus.Metric) {
t.u.collect(ch, t.Filter)
}
// Collect satisfies the prometheus Collector. This runs the input method to get
// the current metrics (from another package) then exports them for prometheus.
func (u *promUnifi) Collect(ch chan<- prometheus.Metric) {
u.collect(ch, nil)
}
func (u *promUnifi) collect(ch chan<- prometheus.Metric, filter *poller.Filter) {
var err error
r := &Report{
Config: u.Config,
ch: make(chan []*metric, u.Config.Buffer),
Start: time.Now()}
defer r.close()
ok := false
if filter == nil {
r.Metrics, ok, err = u.Collector.Metrics()
} else {
r.Metrics, ok, err = u.Collector.MetricsFrom(filter)
}
r.Fetch = time.Since(r.Start)
if err != nil {
r.error(ch, prometheus.NewInvalidDesc(err), fmt.Errorf("metric fetch failed"))
u.Collector.LogErrorf("metric fetch failed: %v", err)
if !ok {
return
}
}
if r.Metrics.Devices == nil {
r.Metrics.Devices = &unifi.Devices{}
}
// Pass Report interface into our collecting and reporting methods.
go u.exportMetrics(r, ch, r.ch)
u.loopExports(r)
}
// This is closely tied to the method above with a sync.WaitGroup.
// This method runs in a go routine and exits when the channel closes.
// This is where our channels connects to the prometheus channel.
func (u *promUnifi) exportMetrics(r report, ch chan<- prometheus.Metric, ourChan chan []*metric) {
descs := make(map[*prometheus.Desc]bool) // used as a counter
defer r.report(u.Collector, descs)
for newMetrics := range ourChan {
for _, m := range newMetrics {
descs[m.Desc] = true
switch v := m.Value.(type) {
case unifi.FlexInt:
ch <- r.export(m, v.Val)
case float64:
ch <- r.export(m, v)
case int64:
ch <- r.export(m, float64(v))
case int:
ch <- r.export(m, float64(v))
default:
r.error(ch, m.Desc, fmt.Sprintf("not a number: %v", m.Value))
}
}
r.done()
}
}
func (u *promUnifi) loopExports(r report) {
m := r.metrics()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
r.add()
go func() {
defer r.done()
for _, s := range m.Sites {
u.exportSite(r, s)
}
}()
go func() {
defer r.done()
for _, s := range m.SitesDPI {
u.exportSiteDPI(r, s)
}
}()
go func() {
defer r.done()
for _, c := range m.Clients {
u.exportClient(r, c)
}
}()
go func() {
defer r.done()
for _, c := range m.ClientsDPI {
u.exportClientDPI(r, c)
}
}()
go func() {
defer r.done()
for _, d := range m.UAPs {
u.exportUAP(r, d)
}
}()
go func() {
defer r.done()
for _, d := range m.UDMs {
u.exportUDM(r, d)
}
}()
go func() {
defer r.done()
for _, d := range m.USGs {
u.exportUSG(r, d)
}
}()
go func() {
defer r.done()
for _, d := range m.USWs {
u.exportUSW(r, d)
}
}()
}

View File

@ -1,80 +0,0 @@
package promunifi
import (
"fmt"
"time"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"github.com/prometheus/client_golang/prometheus"
)
// This file contains the report interface.
// This interface can be mocked and overridden for tests.
// report is an internal interface used to "process metrics"
type report interface {
add()
done()
send([]*metric)
metrics() *poller.Metrics
report(c poller.Collect, descs map[*prometheus.Desc]bool)
export(m *metric, v float64) prometheus.Metric
error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{})
}
// satisfy gomnd
const one = 1
const oneDecimalPoint = 10.0
func (r *Report) add() {
r.wg.Add(one)
}
func (r *Report) done() {
r.wg.Add(-one)
}
func (r *Report) send(m []*metric) {
r.wg.Add(one)
r.ch <- m
}
func (r *Report) metrics() *poller.Metrics {
return r.Metrics
}
func (r *Report) report(c poller.Collect, descs map[*prometheus.Desc]bool) {
m := r.Metrics
c.Logf("UniFi Measurements Exported. Site: %d, Client: %d, "+
"UAP: %d, USG/UDM: %d, USW: %d, Descs: %d, "+
"Metrics: %d, Errs: %d, 0s: %d, Reqs/Total: %v / %v",
len(m.Sites), len(m.Clients), len(m.UAPs), len(m.UDMs)+len(m.USGs), len(m.USWs),
len(descs), r.Total, r.Errors, r.Zeros,
r.Fetch.Round(time.Millisecond/oneDecimalPoint),
r.Elapsed.Round(time.Millisecond/oneDecimalPoint))
}
func (r *Report) export(m *metric, v float64) prometheus.Metric {
r.Total++
if v == 0 {
r.Zeros++
}
return prometheus.MustNewConstMetric(m.Desc, m.ValueType, v, m.Labels...)
}
func (r *Report) error(ch chan<- prometheus.Metric, d *prometheus.Desc, v interface{}) {
r.Errors++
if r.ReportErrors {
ch <- prometheus.NewInvalidMetric(d, fmt.Errorf("error: %v", v))
}
}
// close is not part of the interface.
func (r *Report) close() {
r.wg.Wait()
r.Elapsed = time.Since(r.Start)
close(r.ch)
}

View File

@ -1,152 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
type site struct {
NumUser *prometheus.Desc
NumGuest *prometheus.Desc
NumIot *prometheus.Desc
TxBytesR *prometheus.Desc
RxBytesR *prometheus.Desc
NumAp *prometheus.Desc
NumAdopted *prometheus.Desc
NumDisabled *prometheus.Desc
NumDisconnected *prometheus.Desc
NumPending *prometheus.Desc
NumGw *prometheus.Desc
NumSw *prometheus.Desc
NumSta *prometheus.Desc
Latency *prometheus.Desc
Drops *prometheus.Desc
Uptime *prometheus.Desc
XputUp *prometheus.Desc
XputDown *prometheus.Desc
SpeedtestPing *prometheus.Desc
RemoteUserNumActive *prometheus.Desc
RemoteUserNumInactive *prometheus.Desc
RemoteUserRxBytes *prometheus.Desc
RemoteUserTxBytes *prometheus.Desc
RemoteUserRxPackets *prometheus.Desc
RemoteUserTxPackets *prometheus.Desc
DPITxPackets *prometheus.Desc
DPIRxPackets *prometheus.Desc
DPITxBytes *prometheus.Desc
DPIRxBytes *prometheus.Desc
}
func descSite(ns string) *site {
labels := []string{"subsystem", "status", "site_name", "source"}
labelDPI := []string{"category", "application", "site_name", "source"}
nd := prometheus.NewDesc
return &site{
NumUser: nd(ns+"users", "Number of Users", labels, nil),
NumGuest: nd(ns+"guests", "Number of Guests", labels, nil),
NumIot: nd(ns+"iots", "Number of IoT Devices", labels, nil),
TxBytesR: nd(ns+"transmit_rate_bytes", "Bytes Transmit Rate", labels, nil),
RxBytesR: nd(ns+"receive_rate_bytes", "Bytes Receive Rate", labels, nil),
NumAp: nd(ns+"aps", "Access Point Count", labels, nil),
NumAdopted: nd(ns+"adopted", "Adoption Count", labels, nil),
NumDisabled: nd(ns+"disabled", "Disabled Count", labels, nil),
NumDisconnected: nd(ns+"disconnected", "Disconnected Count", labels, nil),
NumPending: nd(ns+"pending", "Pending Count", labels, nil),
NumGw: nd(ns+"gateways", "Gateway Count", labels, nil),
NumSw: nd(ns+"switches", "Switch Count", labels, nil),
NumSta: nd(ns+"stations", "Station Count", labels, nil),
Latency: nd(ns+"latency_seconds", "Latency", labels, nil),
Uptime: nd(ns+"uptime_seconds", "Uptime", labels, nil),
Drops: nd(ns+"intenet_drops_total", "Internet (WAN) Disconnections", labels, nil),
XputUp: nd(ns+"xput_up_rate", "Speedtest Upload", labels, nil),
XputDown: nd(ns+"xput_down_rate", "Speedtest Download", labels, nil),
SpeedtestPing: nd(ns+"speedtest_ping", "Speedtest Ping", labels, nil),
RemoteUserNumActive: nd(ns+"remote_user_active", "Remote Users Active", labels, nil),
RemoteUserNumInactive: nd(ns+"remote_user_inactive", "Remote Users Inactive", labels, nil),
RemoteUserRxBytes: nd(ns+"remote_user_receive_bytes_total", "Remote Users Receive Bytes", labels, nil),
RemoteUserTxBytes: nd(ns+"remote_user_transmit_bytes_total", "Remote Users Transmit Bytes", labels, nil),
RemoteUserRxPackets: nd(ns+"remote_user_receive_packets_total", "Remote Users Receive Packets", labels, nil),
RemoteUserTxPackets: nd(ns+"remote_user_transmit_packets_total", "Remote Users Transmit Packets", labels, nil),
DPITxPackets: nd(ns+"dpi_transmit_packets", "Site DPI Transmit Packets", labelDPI, nil),
DPIRxPackets: nd(ns+"dpi_receive_packets", "Site DPI Receive Packets", labelDPI, nil),
DPITxBytes: nd(ns+"dpi_transmit_bytes", "Site DPI Transmit Bytes", labelDPI, nil),
DPIRxBytes: nd(ns+"dpi_receive_bytes", "Site DPI Receive Bytes", labelDPI, nil),
}
}
func (u *promUnifi) exportSiteDPI(r report, s *unifi.DPITable) {
for _, dpi := range s.ByApp {
labelDPI := []string{unifi.DPICats.Get(dpi.Cat), unifi.DPIApps.GetApp(dpi.Cat, dpi.App), s.SiteName, s.SourceName}
// log.Println(labelsDPI, dpi.Cat, dpi.App, dpi.TxBytes, dpi.RxBytes, dpi.TxPackets, dpi.RxPackets)
r.send([]*metric{
{u.Site.DPITxPackets, gauge, dpi.TxPackets, labelDPI},
{u.Site.DPIRxPackets, gauge, dpi.RxPackets, labelDPI},
{u.Site.DPITxBytes, gauge, dpi.TxBytes, labelDPI},
{u.Site.DPIRxBytes, gauge, dpi.RxBytes, labelDPI},
})
}
}
func (u *promUnifi) exportSite(r report, s *unifi.Site) {
for _, h := range s.Health {
switch labels := []string{h.Subsystem, h.Status, s.SiteName, s.SourceName}; labels[0] {
case "www":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
{u.Site.Uptime, gauge, h.Uptime, labels},
{u.Site.Latency, gauge, h.Latency.Val / 1000, labels},
{u.Site.XputUp, gauge, h.XputUp, labels},
{u.Site.XputDown, gauge, h.XputDown, labels},
{u.Site.SpeedtestPing, gauge, h.SpeedtestPing, labels},
{u.Site.Drops, counter, h.Drops, labels},
})
case "wlan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
{u.Site.NumPending, gauge, h.NumPending, labels},
{u.Site.NumUser, gauge, h.NumUser, labels},
{u.Site.NumGuest, gauge, h.NumGuest, labels},
{u.Site.NumIot, gauge, h.NumIot, labels},
{u.Site.NumAp, gauge, h.NumAp, labels},
{u.Site.NumDisabled, gauge, h.NumDisabled, labels},
})
case "wan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
{u.Site.NumPending, gauge, h.NumPending, labels},
{u.Site.NumGw, gauge, h.NumGw, labels},
{u.Site.NumSta, gauge, h.NumSta, labels},
})
case "lan":
r.send([]*metric{
{u.Site.TxBytesR, gauge, h.TxBytesR, labels},
{u.Site.RxBytesR, gauge, h.RxBytesR, labels},
{u.Site.NumAdopted, gauge, h.NumAdopted, labels},
{u.Site.NumDisconnected, gauge, h.NumDisconnected, labels},
{u.Site.NumPending, gauge, h.NumPending, labels},
{u.Site.NumUser, gauge, h.NumUser, labels},
{u.Site.NumGuest, gauge, h.NumGuest, labels},
{u.Site.NumIot, gauge, h.NumIot, labels},
{u.Site.NumSw, gauge, h.NumSw, labels},
})
case "vpn":
r.send([]*metric{
{u.Site.RemoteUserNumActive, gauge, h.RemoteUserNumActive, labels},
{u.Site.RemoteUserNumInactive, gauge, h.RemoteUserNumInactive, labels},
{u.Site.RemoteUserRxBytes, counter, h.RemoteUserRxBytes, labels},
{u.Site.RemoteUserTxBytes, counter, h.RemoteUserTxBytes, labels},
{u.Site.RemoteUserRxPackets, counter, h.RemoteUserRxPackets, labels},
{u.Site.RemoteUserTxPackets, counter, h.RemoteUserTxPackets, labels},
})
}
}
}

View File

@ -1,320 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
type uap struct {
// Ap Traffic Stats
ApWifiTxDropped *prometheus.Desc
ApRxErrors *prometheus.Desc
ApRxDropped *prometheus.Desc
ApRxFrags *prometheus.Desc
ApRxCrypts *prometheus.Desc
ApTxPackets *prometheus.Desc
ApTxBytes *prometheus.Desc
ApTxErrors *prometheus.Desc
ApTxDropped *prometheus.Desc
ApTxRetries *prometheus.Desc
ApRxPackets *prometheus.Desc
ApRxBytes *prometheus.Desc
WifiTxAttempts *prometheus.Desc
MacFilterRejections *prometheus.Desc
// VAP Stats
VAPCcq *prometheus.Desc
VAPMacFilterRejections *prometheus.Desc
VAPNumSatisfactionSta *prometheus.Desc
VAPAvgClientSignal *prometheus.Desc
VAPSatisfaction *prometheus.Desc
VAPSatisfactionNow *prometheus.Desc
VAPDNSAvgLatency *prometheus.Desc
VAPRxBytes *prometheus.Desc
VAPRxCrypts *prometheus.Desc
VAPRxDropped *prometheus.Desc
VAPRxErrors *prometheus.Desc
VAPRxFrags *prometheus.Desc
VAPRxNwids *prometheus.Desc
VAPRxPackets *prometheus.Desc
VAPTxBytes *prometheus.Desc
VAPTxDropped *prometheus.Desc
VAPTxErrors *prometheus.Desc
VAPTxPackets *prometheus.Desc
VAPTxPower *prometheus.Desc
VAPTxRetries *prometheus.Desc
VAPTxCombinedRetries *prometheus.Desc
VAPTxDataMpduBytes *prometheus.Desc
VAPTxRtsRetries *prometheus.Desc
VAPTxSuccess *prometheus.Desc
VAPTxTotal *prometheus.Desc
VAPTxGoodbytes *prometheus.Desc
VAPTxLatAvg *prometheus.Desc
VAPTxLatMax *prometheus.Desc
VAPTxLatMin *prometheus.Desc
VAPRxGoodbytes *prometheus.Desc
VAPRxLatAvg *prometheus.Desc
VAPRxLatMax *prometheus.Desc
VAPRxLatMin *prometheus.Desc
VAPWifiTxLatencyMovAvg *prometheus.Desc
VAPWifiTxLatencyMovMax *prometheus.Desc
VAPWifiTxLatencyMovMin *prometheus.Desc
VAPWifiTxLatencyMovTotal *prometheus.Desc
VAPWifiTxLatencyMovCount *prometheus.Desc
// Radio Stats
RadioCurrentAntennaGain *prometheus.Desc
RadioHt *prometheus.Desc
RadioMaxTxpower *prometheus.Desc
RadioMinTxpower *prometheus.Desc
RadioNss *prometheus.Desc
RadioRadioCaps *prometheus.Desc
RadioTxPower *prometheus.Desc
RadioAstBeXmit *prometheus.Desc
RadioChannel *prometheus.Desc
RadioCuSelfRx *prometheus.Desc
RadioCuSelfTx *prometheus.Desc
RadioExtchannel *prometheus.Desc
RadioGain *prometheus.Desc
RadioNumSta *prometheus.Desc
RadioTxPackets *prometheus.Desc
RadioTxRetries *prometheus.Desc
}
func descUAP(ns string) *uap {
labelA := []string{"stat", "site_name", "name", "source"} // stat + labels[1:]
labelV := []string{"vap_name", "bssid", "radio", "radio_name", "essid", "usage", "site_name", "name", "source"}
labelR := []string{"radio_name", "radio", "site_name", "name", "source"}
nd := prometheus.NewDesc
return &uap{
// 3x each - stat table: total, guest, user
ApWifiTxDropped: nd(ns+"stat_wifi_transmt_dropped_total", "Wifi Transmissions Dropped", labelA, nil),
ApRxErrors: nd(ns+"stat_receive_errors_total", "Receive Errors", labelA, nil),
ApRxDropped: nd(ns+"stat_receive_dropped_total", "Receive Dropped", labelA, nil),
ApRxFrags: nd(ns+"stat_receive_frags_total", "Received Frags", labelA, nil),
ApRxCrypts: nd(ns+"stat_receive_crypts_total", "Receive Crypts", labelA, nil),
ApTxPackets: nd(ns+"stat_transmit_packets_total", "Transmit Packets", labelA, nil),
ApTxBytes: nd(ns+"stat_transmit_bytes_total", "Transmit Bytes", labelA, nil),
ApTxErrors: nd(ns+"stat_transmit_errors_total", "Transmit Errors", labelA, nil),
ApTxDropped: nd(ns+"stat_transmit_dropped_total", "Transmit Dropped", labelA, nil),
ApTxRetries: nd(ns+"stat_retries_tx_total", "Transmit Retries", labelA, nil),
ApRxPackets: nd(ns+"stat_receive_packets_total", "Receive Packets", labelA, nil),
ApRxBytes: nd(ns+"stat_receive_bytes_total", "Receive Bytes", labelA, nil),
WifiTxAttempts: nd(ns+"stat_wifi_transmit_attempts_total", "Wifi Transmission Attempts", labelA, nil),
MacFilterRejections: nd(ns+"stat_mac_filter_rejects_total", "MAC Filter Rejections", labelA, nil),
// N each - 1 per Virtual AP (VAP)
VAPCcq: nd(ns+"vap_ccq_ratio", "VAP Client Connection Quality", labelV, nil),
VAPMacFilterRejections: nd(ns+"vap_mac_filter_rejects_total", "VAP MAC Filter Rejections", labelV, nil),
VAPNumSatisfactionSta: nd(ns+"vap_satisfaction_stations", "VAP Number Satisifaction Stations", labelV, nil),
VAPAvgClientSignal: nd(ns+"vap_average_client_signal", "VAP Average Client Signal", labelV, nil),
VAPSatisfaction: nd(ns+"vap_satisfaction_ratio", "VAP Satisfaction", labelV, nil),
VAPSatisfactionNow: nd(ns+"vap_satisfaction_now_ratio", "VAP Satisfaction Now", labelV, nil),
VAPDNSAvgLatency: nd(ns+"vap_dns_latency_average_seconds", "VAP DNS Latency Average", labelV, nil),
VAPRxBytes: nd(ns+"vap_receive_bytes_total", "VAP Bytes Received", labelV, nil),
VAPRxCrypts: nd(ns+"vap_receive_crypts_total", "VAP Crypts Received", labelV, nil),
VAPRxDropped: nd(ns+"vap_receive_dropped_total", "VAP Dropped Received", labelV, nil),
VAPRxErrors: nd(ns+"vap_receive_errors_total", "VAP Errors Received", labelV, nil),
VAPRxFrags: nd(ns+"vap_receive_frags_total", "VAP Frags Received", labelV, nil),
VAPRxNwids: nd(ns+"vap_receive_nwids_total", "VAP Nwids Received", labelV, nil),
VAPRxPackets: nd(ns+"vap_receive_packets_total", "VAP Packets Received", labelV, nil),
VAPTxBytes: nd(ns+"vap_transmit_bytes_total", "VAP Bytes Transmitted", labelV, nil),
VAPTxDropped: nd(ns+"vap_transmit_dropped_total", "VAP Dropped Transmitted", labelV, nil),
VAPTxErrors: nd(ns+"vap_transmit_errors_total", "VAP Errors Transmitted", labelV, nil),
VAPTxPackets: nd(ns+"vap_transmit_packets_total", "VAP Packets Transmitted", labelV, nil),
VAPTxPower: nd(ns+"vap_transmit_power", "VAP Transmit Power", labelV, nil),
VAPTxRetries: nd(ns+"vap_transmit_retries_total", "VAP Retries Transmitted", labelV, nil),
VAPTxCombinedRetries: nd(ns+"vap_transmit_retries_combined_total", "VAP Retries Combined Tx", labelV, nil),
VAPTxDataMpduBytes: nd(ns+"vap_data_mpdu_transmit_bytes_total", "VAP Data MPDU Bytes Tx", labelV, nil),
VAPTxRtsRetries: nd(ns+"vap_transmit_rts_retries_total", "VAP RTS Retries Transmitted", labelV, nil),
VAPTxSuccess: nd(ns+"vap_transmit_success_total", "VAP Success Transmits", labelV, nil),
VAPTxTotal: nd(ns+"vap_transmit_total", "VAP Transmit Total", labelV, nil),
VAPTxGoodbytes: nd(ns+"vap_transmit_goodbyes", "VAP Goodbyes Transmitted", labelV, nil),
VAPTxLatAvg: nd(ns+"vap_transmit_latency_average_seconds", "VAP Latency Average Tx", labelV, nil),
VAPTxLatMax: nd(ns+"vap_transmit_latency_maximum_seconds", "VAP Latency Maximum Tx", labelV, nil),
VAPTxLatMin: nd(ns+"vap_transmit_latency_minimum_seconds", "VAP Latency Minimum Tx", labelV, nil),
VAPRxGoodbytes: nd(ns+"vap_receive_goodbyes", "VAP Goodbyes Received", labelV, nil),
VAPRxLatAvg: nd(ns+"vap_receive_latency_average_seconds", "VAP Latency Average Rx", labelV, nil),
VAPRxLatMax: nd(ns+"vap_receive_latency_maximum_seconds", "VAP Latency Maximum Rx", labelV, nil),
VAPRxLatMin: nd(ns+"vap_receive_latency_minimum_seconds", "VAP Latency Minimum Rx", labelV, nil),
VAPWifiTxLatencyMovAvg: nd(ns+"vap_transmit_latency_moving_avg_seconds", "VAP Latency Moving Avg Tx", labelV, nil),
VAPWifiTxLatencyMovMax: nd(ns+"vap_transmit_latency_moving_max_seconds", "VAP Latency Moving Min Tx", labelV, nil),
VAPWifiTxLatencyMovMin: nd(ns+"vap_transmit_latency_moving_min_seconds", "VAP Latency Moving Max Tx", labelV, nil),
VAPWifiTxLatencyMovTotal: nd(ns+"vap_transmit_latency_moving_total", "VAP Latency Moving Total Tramsit", labelV, nil),
VAPWifiTxLatencyMovCount: nd(ns+"vap_transmit_latency_moving_count", "VAP Latency Moving Count Tramsit", labelV, nil),
// N each - 1 per Radio. 1-4 radios per AP usually
RadioCurrentAntennaGain: nd(ns+"radio_current_antenna_gain", "Radio Current Antenna Gain", labelR, nil),
RadioHt: nd(ns+"radio_ht", "Radio HT", labelR, nil),
RadioMaxTxpower: nd(ns+"radio_max_transmit_power", "Radio Maximum Transmit Power", labelR, nil),
RadioMinTxpower: nd(ns+"radio_min_transmit_power", "Radio Minimum Transmit Power", labelR, nil),
RadioNss: nd(ns+"radio_nss", "Radio Nss", labelR, nil),
RadioRadioCaps: nd(ns+"radio_caps", "Radio Capabilities", labelR, nil),
RadioTxPower: nd(ns+"radio_transmit_power", "Radio Transmit Power", labelR, nil),
RadioAstBeXmit: nd(ns+"radio_ast_be_xmit", "Radio AstBe Transmit", labelR, nil),
RadioChannel: nd(ns+"radio_channel", "Radio Channel", labelR, nil),
RadioCuSelfRx: nd(ns+"radio_channel_utilization_receive_ratio", "Channel Utilization Rx", labelR, nil),
RadioCuSelfTx: nd(ns+"radio_channel_utilization_transmit_ratio", "Channel Utilization Tx", labelR, nil),
RadioExtchannel: nd(ns+"radio_ext_channel", "Radio Ext Channel", labelR, nil),
RadioGain: nd(ns+"radio_gain", "Radio Gain", labelR, nil),
RadioNumSta: nd(ns+"radio_stations", "Radio Total Station Count", append(labelR, "station_type"), nil),
RadioTxPackets: nd(ns+"radio_transmit_packets", "Radio Transmitted Packets", labelR, nil),
RadioTxRetries: nd(ns+"radio_transmit_retries", "Radio Transmit Retries", labelR, nil),
}
}
func (u *promUnifi) exportUAP(r report, d *unifi.UAP) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
u.exportVAPtable(r, labels, d.VapTable)
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta)
u.exportRADtable(r, labels, d.RadioTable, d.RadioTableStats)
r.send([]*metric{
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
}
// udm doesn't have these stats exposed yet, so pass 2 or 6 metrics.
func (u *promUnifi) exportUAPstats(r report, labels []string, ap *unifi.Ap, bytes ...unifi.FlexInt) {
if ap == nil {
return
}
labelU := []string{"user", labels[1], labels[2], labels[3]}
labelG := []string{"guest", labels[1], labels[2], labels[3]}
r.send([]*metric{
// ap only stuff.
{u.Device.BytesD, counter, bytes[0], labels}, // not sure if these 3 Ds are counters or gauges.
{u.Device.TxBytesD, counter, bytes[1], labels}, // not sure if these 3 Ds are counters or gauges.
{u.Device.RxBytesD, counter, bytes[2], labels}, // not sure if these 3 Ds are counters or gauges.
{u.Device.BytesR, gauge, bytes[3], labels}, // only UAP has this one, and those ^ weird.
// user
{u.UAP.ApWifiTxDropped, counter, ap.UserWifiTxDropped, labelU},
{u.UAP.ApRxErrors, counter, ap.UserRxErrors, labelU},
{u.UAP.ApRxDropped, counter, ap.UserRxDropped, labelU},
{u.UAP.ApRxFrags, counter, ap.UserRxFrags, labelU},
{u.UAP.ApRxCrypts, counter, ap.UserRxCrypts, labelU},
{u.UAP.ApTxPackets, counter, ap.UserTxPackets, labelU},
{u.UAP.ApTxBytes, counter, ap.UserTxBytes, labelU},
{u.UAP.ApTxErrors, counter, ap.UserTxErrors, labelU},
{u.UAP.ApTxDropped, counter, ap.UserTxDropped, labelU},
{u.UAP.ApTxRetries, counter, ap.UserTxRetries, labelU},
{u.UAP.ApRxPackets, counter, ap.UserRxPackets, labelU},
{u.UAP.ApRxBytes, counter, ap.UserRxBytes, labelU},
{u.UAP.WifiTxAttempts, counter, ap.UserWifiTxAttempts, labelU},
{u.UAP.MacFilterRejections, counter, ap.UserMacFilterRejections, labelU},
// guest
{u.UAP.ApWifiTxDropped, counter, ap.GuestWifiTxDropped, labelG},
{u.UAP.ApRxErrors, counter, ap.GuestRxErrors, labelG},
{u.UAP.ApRxDropped, counter, ap.GuestRxDropped, labelG},
{u.UAP.ApRxFrags, counter, ap.GuestRxFrags, labelG},
{u.UAP.ApRxCrypts, counter, ap.GuestRxCrypts, labelG},
{u.UAP.ApTxPackets, counter, ap.GuestTxPackets, labelG},
{u.UAP.ApTxBytes, counter, ap.GuestTxBytes, labelG},
{u.UAP.ApTxErrors, counter, ap.GuestTxErrors, labelG},
{u.UAP.ApTxDropped, counter, ap.GuestTxDropped, labelG},
{u.UAP.ApTxRetries, counter, ap.GuestTxRetries, labelG},
{u.UAP.ApRxPackets, counter, ap.GuestRxPackets, labelG},
{u.UAP.ApRxBytes, counter, ap.GuestRxBytes, labelG},
{u.UAP.WifiTxAttempts, counter, ap.GuestWifiTxAttempts, labelG},
{u.UAP.MacFilterRejections, counter, ap.GuestMacFilterRejections, labelG},
})
}
// UAP VAP Table
func (u *promUnifi) exportVAPtable(r report, labels []string, vt unifi.VapTable) {
// vap table stats
for _, v := range vt {
if !v.Up.Val {
continue
}
labelV := []string{v.Name, v.Bssid, v.Radio, v.RadioName, v.Essid, v.Usage, labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.UAP.VAPCcq, gauge, float64(v.Ccq) / 1000.0, labelV},
{u.UAP.VAPMacFilterRejections, counter, v.MacFilterRejections, labelV},
{u.UAP.VAPNumSatisfactionSta, gauge, v.NumSatisfactionSta, labelV},
{u.UAP.VAPAvgClientSignal, gauge, v.AvgClientSignal.Val, labelV},
{u.UAP.VAPSatisfaction, gauge, v.Satisfaction.Val / 100.0, labelV},
{u.UAP.VAPSatisfactionNow, gauge, v.SatisfactionNow.Val / 100.0, labelV},
{u.UAP.VAPDNSAvgLatency, gauge, v.DNSAvgLatency.Val / 1000, labelV},
{u.UAP.VAPRxBytes, counter, v.RxBytes, labelV},
{u.UAP.VAPRxCrypts, counter, v.RxCrypts, labelV},
{u.UAP.VAPRxDropped, counter, v.RxDropped, labelV},
{u.UAP.VAPRxErrors, counter, v.RxErrors, labelV},
{u.UAP.VAPRxFrags, counter, v.RxFrags, labelV},
{u.UAP.VAPRxNwids, counter, v.RxNwids, labelV},
{u.UAP.VAPRxPackets, counter, v.RxPackets, labelV},
{u.UAP.VAPTxBytes, counter, v.TxBytes, labelV},
{u.UAP.VAPTxDropped, counter, v.TxDropped, labelV},
{u.UAP.VAPTxErrors, counter, v.TxErrors, labelV},
{u.UAP.VAPTxPackets, counter, v.TxPackets, labelV},
{u.UAP.VAPTxPower, gauge, v.TxPower, labelV},
{u.UAP.VAPTxRetries, counter, v.TxRetries, labelV},
{u.UAP.VAPTxCombinedRetries, counter, v.TxCombinedRetries, labelV},
{u.UAP.VAPTxDataMpduBytes, counter, v.TxDataMpduBytes, labelV},
{u.UAP.VAPTxRtsRetries, counter, v.TxRtsRetries, labelV},
{u.UAP.VAPTxTotal, counter, v.TxTotal, labelV},
{u.UAP.VAPTxGoodbytes, counter, v.TxTCPStats.Goodbytes, labelV},
{u.UAP.VAPTxLatAvg, gauge, v.TxTCPStats.LatAvg.Val / 1000, labelV},
{u.UAP.VAPTxLatMax, gauge, v.TxTCPStats.LatMax.Val / 1000, labelV},
{u.UAP.VAPTxLatMin, gauge, v.TxTCPStats.LatMin.Val / 1000, labelV},
{u.UAP.VAPRxGoodbytes, counter, v.RxTCPStats.Goodbytes, labelV},
{u.UAP.VAPRxLatAvg, gauge, v.RxTCPStats.LatAvg.Val / 1000, labelV},
{u.UAP.VAPRxLatMax, gauge, v.RxTCPStats.LatMax.Val / 1000, labelV},
{u.UAP.VAPRxLatMin, gauge, v.RxTCPStats.LatMin.Val / 1000, labelV},
{u.UAP.VAPWifiTxLatencyMovAvg, gauge, v.WifiTxLatencyMov.Avg.Val / 1000, labelV},
{u.UAP.VAPWifiTxLatencyMovMax, gauge, v.WifiTxLatencyMov.Max.Val / 1000, labelV},
{u.UAP.VAPWifiTxLatencyMovMin, gauge, v.WifiTxLatencyMov.Min.Val / 1000, labelV},
{u.UAP.VAPWifiTxLatencyMovTotal, counter, v.WifiTxLatencyMov.Total, labelV}, // not sure if gauge or counter.
{u.UAP.VAPWifiTxLatencyMovCount, counter, v.WifiTxLatencyMov.TotalCount, labelV}, // not sure if gauge or counter.
})
}
}
// UAP Radio Table
func (u *promUnifi) exportRADtable(r report, labels []string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
// radio table
for _, p := range rt {
labelR := []string{p.Name, p.Radio, labels[1], labels[2], labels[3]}
labelRUser := append(labelR, "user")
labelRGuest := append(labelR, "guest")
r.send([]*metric{
{u.UAP.RadioCurrentAntennaGain, gauge, p.CurrentAntennaGain, labelR},
{u.UAP.RadioHt, gauge, p.Ht, labelR},
{u.UAP.RadioMaxTxpower, gauge, p.MaxTxpower, labelR},
{u.UAP.RadioMinTxpower, gauge, p.MinTxpower, labelR},
{u.UAP.RadioNss, gauge, p.Nss, labelR},
{u.UAP.RadioRadioCaps, gauge, p.RadioCaps, labelR},
})
// combine radio table with radio stats table.
for _, t := range rts {
if t.Name != p.Name {
continue
}
r.send([]*metric{
{u.UAP.RadioTxPower, gauge, t.TxPower, labelR},
{u.UAP.RadioAstBeXmit, gauge, t.AstBeXmit, labelR},
{u.UAP.RadioChannel, gauge, t.Channel, labelR},
{u.UAP.RadioCuSelfRx, gauge, t.CuSelfRx.Val / 100.0, labelR},
{u.UAP.RadioCuSelfTx, gauge, t.CuSelfTx.Val / 100.0, labelR},
{u.UAP.RadioExtchannel, gauge, t.Extchannel, labelR},
{u.UAP.RadioGain, gauge, t.Gain, labelR},
{u.UAP.RadioNumSta, gauge, t.GuestNumSta, labelRGuest},
{u.UAP.RadioNumSta, gauge, t.UserNumSta, labelRUser},
{u.UAP.RadioTxPackets, gauge, t.TxPackets, labelR},
{u.UAP.RadioTxRetries, gauge, t.TxRetries, labelR},
})
break
}
}
}

View File

@ -1,131 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
// These are shared by all four device types: UDM, UAP, USG, USW
type unifiDevice struct {
Info *prometheus.Desc
Uptime *prometheus.Desc
Temperature *prometheus.Desc // sw only
TotalMaxPower *prometheus.Desc // sw only
FanLevel *prometheus.Desc // sw only
TotalTxBytes *prometheus.Desc
TotalRxBytes *prometheus.Desc
TotalBytes *prometheus.Desc
BytesR *prometheus.Desc // ap only
BytesD *prometheus.Desc // ap only
TxBytesD *prometheus.Desc // ap only
RxBytesD *prometheus.Desc // ap only
Counter *prometheus.Desc
Loadavg1 *prometheus.Desc
Loadavg5 *prometheus.Desc
Loadavg15 *prometheus.Desc
MemBuffer *prometheus.Desc
MemTotal *prometheus.Desc
MemUsed *prometheus.Desc
CPU *prometheus.Desc
Mem *prometheus.Desc
}
func descDevice(ns string) *unifiDevice {
labels := []string{"type", "site_name", "name", "source"}
infoLabels := []string{"version", "model", "serial", "mac", "ip", "id", "bytes", "uptime"}
return &unifiDevice{
Info: prometheus.NewDesc(ns+"info", "Device Information", append(labels, infoLabels...), nil),
Uptime: prometheus.NewDesc(ns+"uptime_seconds", "Device Uptime", labels, nil),
Temperature: prometheus.NewDesc(ns+"temperature_celsius", "Temperature", labels, nil),
TotalMaxPower: prometheus.NewDesc(ns+"max_power_total", "Total Max Power", labels, nil),
FanLevel: prometheus.NewDesc(ns+"fan_level", "Fan Level", labels, nil),
TotalTxBytes: prometheus.NewDesc(ns+"transmit_bytes_total", "Total Transmitted Bytes", labels, nil),
TotalRxBytes: prometheus.NewDesc(ns+"receive_bytes_total", "Total Received Bytes", labels, nil),
TotalBytes: prometheus.NewDesc(ns+"bytes_total", "Total Bytes Transferred", labels, nil),
BytesR: prometheus.NewDesc(ns+"rate_bytes", "Transfer Rate", labels, nil),
BytesD: prometheus.NewDesc(ns+"d_bytes", "Total Bytes D???", labels, nil),
TxBytesD: prometheus.NewDesc(ns+"d_tranmsit_bytes", "Transmit Bytes D???", labels, nil),
RxBytesD: prometheus.NewDesc(ns+"d_receive_bytes", "Receive Bytes D???", labels, nil),
Counter: prometheus.NewDesc(ns+"stations", "Number of Stations", append(labels, "station_type"), nil),
Loadavg1: prometheus.NewDesc(ns+"load_average_1", "System Load Average 1 Minute", labels, nil),
Loadavg5: prometheus.NewDesc(ns+"load_average_5", "System Load Average 5 Minutes", labels, nil),
Loadavg15: prometheus.NewDesc(ns+"load_average_15", "System Load Average 15 Minutes", labels, nil),
MemUsed: prometheus.NewDesc(ns+"memory_used_bytes", "System Memory Used", labels, nil),
MemTotal: prometheus.NewDesc(ns+"memory_installed_bytes", "System Installed Memory", labels, nil),
MemBuffer: prometheus.NewDesc(ns+"memory_buffer_bytes", "System Memory Buffer", labels, nil),
CPU: prometheus.NewDesc(ns+"cpu_utilization_ratio", "System CPU % Utilized", labels, nil),
Mem: prometheus.NewDesc(ns+"memory_utilization_ratio", "System Memory % Utilized", labels, nil),
}
}
// UDM is a collection of stats from USG, USW and UAP. It has no unique stats.
func (u *promUnifi) exportUDM(r report, d *unifi.UDM) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
// Shared data (all devices do this).
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta, d.NumDesktop, d.NumMobile, d.NumHandheld)
// Switch Data
u.exportUSWstats(r, labels, d.Stat.Sw)
u.exportPRTtable(r, labels, d.PortTable)
// Gateway Data
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
u.exportUSGstats(r, labels, d.Stat.Gw, d.SpeedtestStatus, d.Uplink)
// Dream Machine System Data.
r.send([]*metric{
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
// Wireless Data - UDM (non-pro) only
if d.Stat.Ap != nil && d.VapTable != nil {
u.exportUAPstats(r, labels, d.Stat.Ap, d.BytesD, d.TxBytesD, d.RxBytesD, d.BytesR)
u.exportVAPtable(r, labels, *d.VapTable)
u.exportRADtable(r, labels, *d.RadioTable, *d.RadioTableStats)
}
}
// shared by all
func (u *promUnifi) exportBYTstats(r report, labels []string, tx, rx unifi.FlexInt) {
r.send([]*metric{
{u.Device.TotalTxBytes, counter, tx, labels},
{u.Device.TotalRxBytes, counter, rx, labels},
{u.Device.TotalBytes, counter, tx.Val + rx.Val, labels},
})
}
// shared by all, pass 2 or 5 stats.
func (u *promUnifi) exportSTAcount(r report, labels []string, stas ...unifi.FlexInt) {
r.send([]*metric{
{u.Device.Counter, gauge, stas[0], append(labels, "user")},
{u.Device.Counter, gauge, stas[1], append(labels, "guest")},
})
if len(stas) > 2 {
r.send([]*metric{
{u.Device.Counter, gauge, stas[2], append(labels, "desktop")},
{u.Device.Counter, gauge, stas[3], append(labels, "mobile")},
{u.Device.Counter, gauge, stas[4], append(labels, "handheld")},
})
}
}
// shared by all
func (u *promUnifi) exportSYSstats(r report, labels []string, s unifi.SysStats, ss unifi.SystemStats) {
r.send([]*metric{
{u.Device.Loadavg1, gauge, s.Loadavg1, labels},
{u.Device.Loadavg5, gauge, s.Loadavg5, labels},
{u.Device.Loadavg15, gauge, s.Loadavg15, labels},
{u.Device.MemUsed, gauge, s.MemUsed, labels},
{u.Device.MemTotal, gauge, s.MemTotal, labels},
{u.Device.MemBuffer, gauge, s.MemBuffer, labels},
{u.Device.CPU, gauge, ss.CPU.Val / 100.0, labels},
{u.Device.Mem, gauge, ss.Mem.Val / 100.0, labels},
})
}

View File

@ -1,144 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
type usg struct {
WanRxPackets *prometheus.Desc
WanRxBytes *prometheus.Desc
WanRxDropped *prometheus.Desc
WanRxErrors *prometheus.Desc
WanTxPackets *prometheus.Desc
WanTxBytes *prometheus.Desc
LanRxPackets *prometheus.Desc
LanRxBytes *prometheus.Desc
LanRxDropped *prometheus.Desc
LanTxPackets *prometheus.Desc
LanTxBytes *prometheus.Desc
WanRxBroadcast *prometheus.Desc
WanRxBytesR *prometheus.Desc
WanRxMulticast *prometheus.Desc
WanSpeed *prometheus.Desc
WanTxBroadcast *prometheus.Desc
WanTxBytesR *prometheus.Desc
WanTxDropped *prometheus.Desc
WanTxErrors *prometheus.Desc
WanTxMulticast *prometheus.Desc
WanBytesR *prometheus.Desc
Latency *prometheus.Desc
UplinkLatency *prometheus.Desc
UplinkSpeed *prometheus.Desc
Runtime *prometheus.Desc
XputDownload *prometheus.Desc
XputUpload *prometheus.Desc
}
func descUSG(ns string) *usg {
labels := []string{"port", "site_name", "name", "source"}
return &usg{
WanRxPackets: prometheus.NewDesc(ns+"wan_receive_packets_total", "WAN Receive Packets Total", labels, nil),
WanRxBytes: prometheus.NewDesc(ns+"wan_receive_bytes_total", "WAN Receive Bytes Total", labels, nil),
WanRxDropped: prometheus.NewDesc(ns+"wan_receive_dropped_total", "WAN Receive Dropped Total", labels, nil),
WanRxErrors: prometheus.NewDesc(ns+"wan_receive_errors_total", "WAN Receive Errors Total", labels, nil),
WanTxPackets: prometheus.NewDesc(ns+"wan_transmit_packets_total", "WAN Transmit Packets Total", labels, nil),
WanTxBytes: prometheus.NewDesc(ns+"wan_transmit_bytes_total", "WAN Transmit Bytes Total", labels, nil),
WanRxBroadcast: prometheus.NewDesc(ns+"wan_receive_broadcast_total", "WAN Receive Broadcast Total", labels, nil),
WanRxBytesR: prometheus.NewDesc(ns+"wan_receive_rate_bytes", "WAN Receive Bytes Rate", labels, nil),
WanRxMulticast: prometheus.NewDesc(ns+"wan_receive_multicast_total", "WAN Receive Multicast Total", labels, nil),
WanSpeed: prometheus.NewDesc(ns+"wan_speed_bps", "WAN Speed", labels, nil),
WanTxBroadcast: prometheus.NewDesc(ns+"wan_transmit_broadcast_total", "WAN Transmit Broadcast Total", labels, nil),
WanTxBytesR: prometheus.NewDesc(ns+"wan_transmit_rate_bytes", "WAN Transmit Bytes Rate", labels, nil),
WanTxDropped: prometheus.NewDesc(ns+"wan_transmit_dropped_total", "WAN Transmit Dropped Total", labels, nil),
WanTxErrors: prometheus.NewDesc(ns+"wan_transmit_errors_total", "WAN Transmit Errors Total", labels, nil),
WanTxMulticast: prometheus.NewDesc(ns+"wan_transmit_multicast_total", "WAN Transmit Multicast Total", labels, nil),
WanBytesR: prometheus.NewDesc(ns+"wan_rate_bytes", "WAN Transfer Rate", labels, nil),
LanRxPackets: prometheus.NewDesc(ns+"lan_receive_packets_total", "LAN Receive Packets Total", labels, nil),
LanRxBytes: prometheus.NewDesc(ns+"lan_receive_bytes_total", "LAN Receive Bytes Total", labels, nil),
LanRxDropped: prometheus.NewDesc(ns+"lan_receive_dropped_total", "LAN Receive Dropped Total", labels, nil),
LanTxPackets: prometheus.NewDesc(ns+"lan_transmit_packets_total", "LAN Transmit Packets Total", labels, nil),
LanTxBytes: prometheus.NewDesc(ns+"lan_transmit_bytes_total", "LAN Transmit Bytes Total", labels, nil),
Latency: prometheus.NewDesc(ns+"speedtest_latency_seconds", "Speedtest Latency", labels, nil),
UplinkLatency: prometheus.NewDesc(ns+"uplink_latency_seconds", "Uplink Latency", labels, nil),
UplinkSpeed: prometheus.NewDesc(ns+"uplink_speed_mbps", "Uplink Speed", labels, nil),
Runtime: prometheus.NewDesc(ns+"speedtest_runtime", "Speedtest Run Time", labels, nil),
XputDownload: prometheus.NewDesc(ns+"speedtest_download", "Speedtest Download Rate", labels, nil),
XputUpload: prometheus.NewDesc(ns+"speedtest_upload", "Speedtest Upload Rate", labels, nil),
}
}
func (u *promUnifi) exportUSG(r report, d *unifi.USG) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
// Gateway System Data.
u.exportWANPorts(r, labels, d.Wan1, d.Wan2)
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
u.exportUSGstats(r, labels, d.Stat.Gw, d.SpeedtestStatus, d.Uplink)
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta, d.NumDesktop, d.UserNumSta, d.GuestNumSta)
r.send([]*metric{
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
}
// Gateway States
func (u *promUnifi) exportUSGstats(r report, labels []string, gw *unifi.Gw, st unifi.SpeedtestStatus, ul unifi.Uplink) {
if gw == nil {
return
}
labelLan := []string{"lan", labels[1], labels[2], labels[3]}
labelWan := []string{"all", labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.USG.LanRxPackets, counter, gw.LanRxPackets, labelLan},
{u.USG.LanRxBytes, counter, gw.LanRxBytes, labelLan},
{u.USG.LanTxPackets, counter, gw.LanTxPackets, labelLan},
{u.USG.LanTxBytes, counter, gw.LanTxBytes, labelLan},
{u.USG.LanRxDropped, counter, gw.LanRxDropped, labelLan},
{u.USG.UplinkLatency, gauge, ul.Latency.Val / 1000, labelWan},
{u.USG.UplinkSpeed, gauge, ul.Speed, labelWan},
// Speed Test Stats
{u.USG.Latency, gauge, st.Latency.Val / 1000, labelWan},
{u.USG.Runtime, gauge, st.Runtime, labelWan},
{u.USG.XputDownload, gauge, st.XputDownload, labelWan},
{u.USG.XputUpload, gauge, st.XputUpload, labelWan},
})
}
// WAN Stats
func (u *promUnifi) exportWANPorts(r report, labels []string, wans ...unifi.Wan) {
for _, wan := range wans {
if !wan.Up.Val {
continue // only record UP interfaces.
}
labelWan := []string{wan.Name, labels[1], labels[2], labels[3]}
r.send([]*metric{
{u.USG.WanRxPackets, counter, wan.RxPackets, labelWan},
{u.USG.WanRxBytes, counter, wan.RxBytes, labelWan},
{u.USG.WanRxDropped, counter, wan.RxDropped, labelWan},
{u.USG.WanRxErrors, counter, wan.RxErrors, labelWan},
{u.USG.WanTxPackets, counter, wan.TxPackets, labelWan},
{u.USG.WanTxBytes, counter, wan.TxBytes, labelWan},
{u.USG.WanRxBroadcast, counter, wan.RxBroadcast, labelWan},
{u.USG.WanRxMulticast, counter, wan.RxMulticast, labelWan},
{u.USG.WanSpeed, counter, wan.Speed.Val * 1000000, labelWan},
{u.USG.WanTxBroadcast, counter, wan.TxBroadcast, labelWan},
{u.USG.WanTxBytesR, counter, wan.TxBytesR, labelWan},
{u.USG.WanTxDropped, counter, wan.TxDropped, labelWan},
{u.USG.WanTxErrors, counter, wan.TxErrors, labelWan},
{u.USG.WanTxMulticast, counter, wan.TxMulticast, labelWan},
{u.USG.WanBytesR, gauge, wan.BytesR, labelWan},
})
}
}

View File

@ -1,194 +0,0 @@
package promunifi
import (
"github.com/prometheus/client_golang/prometheus"
"golift.io/unifi"
)
type usw struct {
// Switch "total" traffic stats
SwRxPackets *prometheus.Desc
SwRxBytes *prometheus.Desc
SwRxErrors *prometheus.Desc
SwRxDropped *prometheus.Desc
SwRxCrypts *prometheus.Desc
SwRxFrags *prometheus.Desc
SwTxPackets *prometheus.Desc
SwTxBytes *prometheus.Desc
SwTxErrors *prometheus.Desc
SwTxDropped *prometheus.Desc
SwTxRetries *prometheus.Desc
SwRxMulticast *prometheus.Desc
SwRxBroadcast *prometheus.Desc
SwTxMulticast *prometheus.Desc
SwTxBroadcast *prometheus.Desc
SwBytes *prometheus.Desc
// Port data.
PoeCurrent *prometheus.Desc
PoePower *prometheus.Desc
PoeVoltage *prometheus.Desc
RxBroadcast *prometheus.Desc
RxBytes *prometheus.Desc
RxBytesR *prometheus.Desc
RxDropped *prometheus.Desc
RxErrors *prometheus.Desc
RxMulticast *prometheus.Desc
RxPackets *prometheus.Desc
Satisfaction *prometheus.Desc
Speed *prometheus.Desc
TxBroadcast *prometheus.Desc
TxBytes *prometheus.Desc
TxBytesR *prometheus.Desc
TxDropped *prometheus.Desc
TxErrors *prometheus.Desc
TxMulticast *prometheus.Desc
TxPackets *prometheus.Desc
}
func descUSW(ns string) *usw {
pns := ns + "port_"
labelS := []string{"site_name", "name", "source"}
labelP := []string{"port_id", "port_num", "port_name", "port_mac", "port_ip", "site_name", "name", "source"}
nd := prometheus.NewDesc
return &usw{
// This data may be derivable by sum()ing the port data.
SwRxPackets: nd(ns+"switch_receive_packets_total", "Switch Packets Received Total", labelS, nil),
SwRxBytes: nd(ns+"switch_receive_bytes_total", "Switch Bytes Received Total", labelS, nil),
SwRxErrors: nd(ns+"switch_receive_errors_total", "Switch Errors Received Total", labelS, nil),
SwRxDropped: nd(ns+"switch_receive_dropped_total", "Switch Dropped Received Total", labelS, nil),
SwRxCrypts: nd(ns+"switch_receive_crypts_total", "Switch Crypts Received Total", labelS, nil),
SwRxFrags: nd(ns+"switch_receive_frags_total", "Switch Frags Received Total", labelS, nil),
SwTxPackets: nd(ns+"switch_transmit_packets_total", "Switch Packets Transmit Total", labelS, nil),
SwTxBytes: nd(ns+"switch_transmit_bytes_total", "Switch Bytes Transmit Total", labelS, nil),
SwTxErrors: nd(ns+"switch_transmit_errors_total", "Switch Errors Transmit Total", labelS, nil),
SwTxDropped: nd(ns+"switch_transmit_dropped_total", "Switch Dropped Transmit Total", labelS, nil),
SwTxRetries: nd(ns+"switch_transmit_retries_total", "Switch Retries Transmit Total", labelS, nil),
SwRxMulticast: nd(ns+"switch_receive_multicast_total", "Switch Multicast Receive Total", labelS, nil),
SwRxBroadcast: nd(ns+"switch_receive_broadcast_total", "Switch Broadcast Receive Total", labelS, nil),
SwTxMulticast: nd(ns+"switch_transmit_multicast_total", "Switch Multicast Transmit Total", labelS, nil),
SwTxBroadcast: nd(ns+"switch_transmit_broadcast_total", "Switch Broadcast Transmit Total", labelS, nil),
SwBytes: nd(ns+"switch_bytes_total", "Switch Bytes Transferred Total", labelS, nil),
// per-port data
PoeCurrent: nd(pns+"poe_amperes", "POE Current", labelP, nil),
PoePower: nd(pns+"poe_watts", "POE Power", labelP, nil),
PoeVoltage: nd(pns+"poe_volts", "POE Voltage", labelP, nil),
RxBroadcast: nd(pns+"receive_broadcast_total", "Receive Broadcast", labelP, nil),
RxBytes: nd(pns+"receive_bytes_total", "Total Receive Bytes", labelP, nil),
RxBytesR: nd(pns+"receive_rate_bytes", "Receive Bytes Rate", labelP, nil),
RxDropped: nd(pns+"receive_dropped_total", "Total Receive Dropped", labelP, nil),
RxErrors: nd(pns+"receive_errors_total", "Total Receive Errors", labelP, nil),
RxMulticast: nd(pns+"receive_multicast_total", "Total Receive Multicast", labelP, nil),
RxPackets: nd(pns+"receive_packets_total", "Total Receive Packets", labelP, nil),
Satisfaction: nd(pns+"satisfaction_ratio", "Satisfaction", labelP, nil),
Speed: nd(pns+"port_speed_bps", "Speed", labelP, nil),
TxBroadcast: nd(pns+"transmit_broadcast_total", "Total Transmit Broadcast", labelP, nil),
TxBytes: nd(pns+"transmit_bytes_total", "Total Transmit Bytes", labelP, nil),
TxBytesR: nd(pns+"transmit_rate_bytes", "Transmit Bytes Rate", labelP, nil),
TxDropped: nd(pns+"transmit_dropped_total", "Total Transmit Dropped", labelP, nil),
TxErrors: nd(pns+"transmit_errors_total", "Total Transmit Errors", labelP, nil),
TxMulticast: nd(pns+"transmit_multicast_total", "Total Tranmist Multicast", labelP, nil),
TxPackets: nd(pns+"transmit_packets_total", "Total Transmit Packets", labelP, nil),
}
}
func (u *promUnifi) exportUSW(r report, d *unifi.USW) {
if !d.Adopted.Val || d.Locating.Val {
return
}
labels := []string{d.Type, d.SiteName, d.Name, d.SourceName}
infoLabels := []string{d.Version, d.Model, d.Serial, d.Mac, d.IP, d.ID, d.Bytes.Txt, d.Uptime.Txt}
u.exportUSWstats(r, labels, d.Stat.Sw)
u.exportPRTtable(r, labels, d.PortTable)
u.exportBYTstats(r, labels, d.TxBytes, d.RxBytes)
u.exportSYSstats(r, labels, d.SysStats, d.SystemStats)
u.exportSTAcount(r, labels, d.UserNumSta, d.GuestNumSta)
r.send([]*metric{
{u.Device.Info, gauge, 1.0, append(labels, infoLabels...)},
{u.Device.Uptime, gauge, d.Uptime, labels},
})
// Switch System Data.
if d.HasTemperature.Val {
r.send([]*metric{{u.Device.Temperature, gauge, d.GeneralTemperature, labels}})
}
if d.HasFan.Val {
r.send([]*metric{{u.Device.FanLevel, gauge, d.FanLevel, labels}})
}
if d.TotalMaxPower.Txt != "" {
r.send([]*metric{{u.Device.TotalMaxPower, gauge, d.TotalMaxPower, labels}})
}
}
// Switch Stats
func (u *promUnifi) exportUSWstats(r report, labels []string, sw *unifi.Sw) {
if sw == nil {
return
}
labelS := labels[1:]
r.send([]*metric{
{u.USW.SwRxPackets, counter, sw.RxPackets, labelS},
{u.USW.SwRxBytes, counter, sw.RxBytes, labelS},
{u.USW.SwRxErrors, counter, sw.RxErrors, labelS},
{u.USW.SwRxDropped, counter, sw.RxDropped, labelS},
{u.USW.SwRxCrypts, counter, sw.RxCrypts, labelS},
{u.USW.SwRxFrags, counter, sw.RxFrags, labelS},
{u.USW.SwTxPackets, counter, sw.TxPackets, labelS},
{u.USW.SwTxBytes, counter, sw.TxBytes, labelS},
{u.USW.SwTxErrors, counter, sw.TxErrors, labelS},
{u.USW.SwTxDropped, counter, sw.TxDropped, labelS},
{u.USW.SwTxRetries, counter, sw.TxRetries, labelS},
{u.USW.SwRxMulticast, counter, sw.RxMulticast, labelS},
{u.USW.SwRxBroadcast, counter, sw.RxBroadcast, labelS},
{u.USW.SwTxMulticast, counter, sw.TxMulticast, labelS},
{u.USW.SwTxBroadcast, counter, sw.TxBroadcast, labelS},
{u.USW.SwBytes, counter, sw.Bytes, labelS},
})
}
// Switch Port Table
func (u *promUnifi) exportPRTtable(r report, labels []string, pt []unifi.Port) {
// Per-port data on a switch
for _, p := range pt {
if !p.Up.Val || !p.Enable.Val {
continue
}
// Copy labels, and add four new ones.
labelP := []string{labels[2] + " Port " + p.PortIdx.Txt, p.PortIdx.Txt,
p.Name, p.Mac, p.IP, labels[1], labels[2], labels[3]}
if p.PoeEnable.Val && p.PortPoe.Val {
r.send([]*metric{
{u.USW.PoeCurrent, gauge, p.PoeCurrent, labelP},
{u.USW.PoePower, gauge, p.PoePower, labelP},
{u.USW.PoeVoltage, gauge, p.PoeVoltage, labelP},
})
}
r.send([]*metric{
{u.USW.RxBroadcast, counter, p.RxBroadcast, labelP},
{u.USW.RxBytes, counter, p.RxBytes, labelP},
{u.USW.RxBytesR, gauge, p.RxBytesR, labelP},
{u.USW.RxDropped, counter, p.RxDropped, labelP},
{u.USW.RxErrors, counter, p.RxErrors, labelP},
{u.USW.RxMulticast, counter, p.RxMulticast, labelP},
{u.USW.RxPackets, counter, p.RxPackets, labelP},
{u.USW.Satisfaction, gauge, p.Satisfaction.Val / 100.0, labelP},
{u.USW.Speed, gauge, p.Speed.Val * 1000000, labelP},
{u.USW.TxBroadcast, counter, p.TxBroadcast, labelP},
{u.USW.TxBytes, counter, p.TxBytes, labelP},
{u.USW.TxBytesR, gauge, p.TxBytesR, labelP},
{u.USW.TxDropped, counter, p.TxDropped, labelP},
{u.USW.TxErrors, counter, p.TxErrors, labelP},
{u.USW.TxMulticast, counter, p.TxMulticast, labelP},
{u.USW.TxPackets, counter, p.TxPackets, labelP},
})
}
}

View File

@ -1,26 +0,0 @@
# MYSQL Output Plugin Example
The code here, and the dynamic plugin provided shows an example of how you can
write your own output for unifi-poller. This plugin records some very basic
data about clients on a unifi network into a mysql database.
You could write outputs that do... anything. An example: They could compare current
connected clients to a previous list (in a db, or stored in memory), and send a
notification if it changes. The possibilities are endless.
You must compile your plugin using the unifi-poller source for the version you're
using. In other words, to build a plugin for version 2.0.1, do this:
```
mkdir -p $GOPATH/src/github.com/davidnewhall
cd $GOPATH/src/github.com/davidnewhall
git clone git@github.com:davidnewhall/unifi-poller.git
cd unifi-poller
git checkout v2.0.1
make vendor
cp -r <your plugin> plugins/
GOOS=linux make plugins
```
The plugin you copy in *must* have a `main.go` file for `make plugins` to build it.

View File

@ -1,45 +0,0 @@
package main
import (
"fmt"
"github.com/davidnewhall/unifi-poller/pkg/poller"
"golift.io/cnfg"
)
// mysqlConfig represents the data that is unmarshalled from the up.conf config file for this plugins.
type mysqlConfig struct {
Interval cnfg.Duration `json:"interval" toml:"interval" xml:"interval" yaml:"interval"`
Host string `json:"host" toml:"host" xml:"host" yaml:"host"`
User string `json:"user" toml:"user" xml:"user" yaml:"user"`
Pass string `json:"pass" toml:"pass" xml:"pass" yaml:"pass"`
DB string `json:"db" toml:"db" xml:"db" yaml:"db"`
Table string `json:"table" toml:"table" xml:"table" yaml:"table"`
// Maps do not work with ENV VARIABLES yet, but may in the future.
Fields []string `json:"fields" toml:"fields" xml:"field" yaml:"fields"`
}
// Pointers are ignored during ENV variable unmarshal, avoid pointers to your config.
// Only capital (exported) members are unmarshaled when passed into poller.NewOutput().
type plugin struct {
Config mysqlConfig `json:"mysql" toml:"mysql" xml:"mysql" yaml:"mysql"`
}
func init() {
u := &plugin{Config: mysqlConfig{}}
poller.NewOutput(&poller.Output{
Name: "mysql",
Config: u, // pass in the struct *above* your config (so it can see the struct tags).
Method: u.Run,
})
}
func main() {
fmt.Println("this is a unifi-poller plugin; not an application")
}
func (a *plugin) Run(c poller.Collect) error {
c.Logf("mysql plugin is not finished")
return nil
}

View File

@ -1,11 +0,0 @@
#!/bin/bash
# This file is used by deb and rpm packages.
# FPM adds this as the after-install script.
if [ -x "/bin/systemctl" ]; then
# Reload and restart - this starts the application as user nobody.
/bin/systemctl daemon-reload
/bin/systemctl enable unifi-poller
/bin/systemctl restart unifi-poller
fi

View File

@ -1,12 +0,0 @@
#!/bin/bash
# This file is used by rpm and deb packages. FPM use.
if [ "$1" = "upgrade" ] || [ "$1" = "1" ] ; then
exit 0
fi
if [ -x "/bin/systemctl" ]; then
/bin/systemctl stop unifi-poller
/bin/systemctl disable unifi-poller
fi

View File

@ -1,41 +0,0 @@
#!/bin/bash -x
# Deploys a new homebrew formula file to a github homebrew formula repo: $HBREPO
# Requires SSH credentials in ssh-agent to work.
# Run by Travis-CI when a new release is created on GitHub.
# Do not edit this file. It's part of application-builder.
# https://github.com/golift/application-builder
source .metadata.sh
make ${BINARY}.rb
git config --global user.email "${BINARY}@auto.releaser"
git config --global user.name "${BINARY}-auto-releaser"
rm -rf homebrew_release_repo
git clone git@github.com:${HBREPO}.git homebrew_release_repo
# If a bitly token file exists, we'll use that to shorten the link (and allow download counting).
if [ -f "bitly_token" ]; then
API=https://api-ssl.bitly.com/v4/bitlinks
# Request payload. In single quotes with double quotes escaped. :see_no_evil:
JSON='{\"domain\": \"bit.ly\",\"title\": \"${BINARY}.v${VERSION}-${ITERATION}.tgz\", \
\"tags\": [\"${BINARY}\"], \"long_url\": \"${SOURCE_PATH}\"}'
# Request with headers and data. Using bash -c to hide token from bash -x in travis logs.
OUT=$(bash -c "curl -s -X POST -H 'Content-type: application/json' ${API} -H \"\$(<bitly_token)\" -d \"${JSON}\"")
# Extract link from reply.
LINK="$(echo ${OUT} | jq -r .link | sed 's/http:/https:/')?v=v${VERSION}"
# Replace link in formula.
sed "s#^ url.*\$# url \"${LINK}\"#" ${BINARY}.rb > ${BINARY}.rb.new
if [ "$?" = "0" ] && [ "$LINK" != "null?v=v${VERSION}" ] && [ "$LINK" != "?v=v${VERSION}" ]; then
mv ${BINARY}.rb.new ${BINARY}.rb
fi
fi
cp ${BINARY}.rb homebrew_release_repo/Formula
pushd homebrew_release_repo
git add Formula/${BINARY}.rb
git commit -m "Update ${BINARY} on Release: v${VERSION}-${ITERATION}"
git push
popd

View File

@ -1,96 +0,0 @@
#!/bin/bash
# This is a quick and drity script to install the latest Linux package.
#
# Use it like this: (sudo is optional)
# ===
# curl https://raw.githubusercontent.com/davidnewhall/unifi-poller/master/scripts/install.sh | sudo bash
# ===
# If you're on redhat, this installs the latest rpm. If you're on Debian, it installs the latest deb package.
#
# This is part of application-builder.
# https://github.com/golift/application-builder
REPO=davidnewhall/unifi-poller
LATEST=https://api.github.com/repos/${REPO}/releases/latest
ARCH=$(uname -m)
# $ARCH is passed into egrep to find the right file.
if [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
ARCH="x86_64|amd64"
elif [[ $ARCH == *386* ]] || [[ $ARCH == *686* ]]; then
ARCH="i386"
elif [[ $ARCH == *arm64* ]] || [[ $ARCH == *armv8* ]]; then
ARCH="arm64"
elif [[ $ARCH == *armv6* ]] || [[ $ARCH == *armv7* ]]; then
ARCH="armhf"
else
echo "Unknown Architecture. Submit a pull request to fix this, please."
echo ==> $ARCH
exit 1
fi
if [ "$1" == "deb" ] || [ "$1" == "rpm" ]; then
FILE=$1
else
# If you have both, rpm wins.
rpm --version > /dev/null 2>&1
if [ "$?" = "0" ]; then
FILE=rpm
else
dpkg --version > /dev/null 2>&1
if [ "$?" = "0" ]; then
FILE=deb
fi
fi
fi
if [ "$FILE" = "" ]; then
echo "No dpkg or rpm package managers found!"
exit 1
fi
# curl or wget?
curl --version > /dev/null 2>&1
if [ "$?" = "0" ]; then
CMD="curl -L"
else
wget --version > /dev/null 2>&1
if [ "$?" = "0" ]; then
CMD="wget -O-"
fi
fi
if [ "$CMD" = "" ]; then
echo "Need curl or wget - could not find either!"
exit 1
fi
# Grab latest release file from github.
URL=$($CMD ${LATEST} | egrep "browser_download_url.*(${ARCH})\.${FILE}\"" | cut -d\" -f 4)
if [ "$?" != "0" ] || [ "$URL" = "" ]; then
echo "Error locating latest release at ${LATEST}"
exit 1
fi
INSTALLER="rpm -Uvh"
if [ "$FILE" = "deb" ]; then
INSTALLER="dpkg --force-confdef --force-confold --install"
fi
FILE=$(basename ${URL})
echo "Downloading: ${URL} to /tmp/${FILE}"
$CMD ${URL} > /tmp/${FILE}
# Install it.
if [ "$(id -u)" = "0" ]; then
echo "==================================="
echo "Downloaded. Installing the package!"
echo "Running: ${INSTALLER} /tmp/${FILE}"
$INSTALLER /tmp/${FILE}
else
echo "================================"
echo "Downloaded. Install the package:"
echo "sudo $INSTALLER /tmp/${FILE}"
fi

View File

@ -1,33 +0,0 @@
#!/bin/bash
# Deploy our built packages to jfrog bintray.
COMPONENT=unstable
if [ "$TRAVIS_BRANCH" == "$TRAVIS_TAG" ] && [ "$TRAVIS_BRANCH" != "" ]; then
COMPONENT=main
fi
echo "deploying packages from branch: $TRAVIS_BRANCH, tag: $TRAVIS_TAG to repo: $COMPONENT"
source .metadata.sh
for os in el centos; do
for arch in arm64 armhf x86_64 i386; do
file="unifi-poller-${VERSION}-${ITERATION}.${arch}.rpm"
opts="publish=1;override=1"
url="https://api.bintray.com/content/golift/${os}/unifi-poller/${VERSION}-${ITERATION}/${COMPONENT}/${arch}/${file}"
echo curl -T "release/${file}" "${url};${opts}"
curl -T "release/${file}" -u "${JFROG_USER_API_KEY}" "${url};${opts}"
echo
done
done
for os in ubuntu debian; do
for arch in arm64 armhf amd64 i386; do
file="unifi-poller_${VERSION}-${ITERATION}_${arch}.deb"
opts="deb_distribution=xenial,bionic,focal,jesse,stretch,buster,bullseye;deb_component=${COMPONENT};deb_architecture=${arch};publish=1;override=1"
url="https://api.bintray.com/content/golift/${os}/unifi-poller/${VERSION}-${ITERATION}/${file}"
echo curl -T "release/${file}" "${url};${opts}"
curl -T "release/${file}" -u "${JFROG_USER_API_KEY}" "${url};${opts}"
echo
done
done