Merge branch 'master' of ../datadogunifi into merge-them-all
This commit is contained in:
commit
ae6e408be4
|
|
@ -0,0 +1,17 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
.vscode/
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
vendor/
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.18.x
|
||||
before_install:
|
||||
# download super-linter: golangci-lint
|
||||
- curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin latest
|
||||
script:
|
||||
- go test ./...
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2020 Cody Lee
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
# datadogunifi
|
||||
|
||||
UniFi Poller Output Plugin for DataDog
|
||||
|
||||
## Configuration
|
||||
|
||||
```yaml
|
||||
datadog:
|
||||
# How often to poll UniFi and report to Datadog.
|
||||
interval: "2m"
|
||||
|
||||
# To disable this output plugin
|
||||
disable: false
|
||||
|
||||
# Datadog Custom Options
|
||||
|
||||
# address to talk to the datadog agent, by default this uses the local statsd UDP interface
|
||||
# address: "..."
|
||||
|
||||
# namespace to prepend to all data
|
||||
# namespace: ""
|
||||
|
||||
# tags to append to all data
|
||||
# tags:
|
||||
# - foo
|
||||
|
||||
# max_bytes_per_payload is the maximum number of bytes a single payload will contain.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 1432 for UDP and 8192 for UDS.
|
||||
# max_bytes_per_payload: 0
|
||||
|
||||
# max_messages_per_payload is the maximum number of metrics, events and/or service checks a single payload will contain.
|
||||
# This option can be set to `1` to create an unbuffered client.
|
||||
# max_messages_per_payload: 0
|
||||
|
||||
# BufferPoolSize is the size of the pool of buffers in number of buffers.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
# buffer_pool_size: 0
|
||||
|
||||
# buffer_flush_interval is the interval after which the current buffer will get flushed.
|
||||
# buffer_flush_interval: 0
|
||||
|
||||
# buffer_shard_count is the number of buffer "shards" that will be used.
|
||||
# Those shards allows the use of multiple buffers at the same time to reduce
|
||||
# lock contention.
|
||||
# buffer_shard_count: 0
|
||||
|
||||
# sender_queue_size is the size of the sender queue in number of buffers.
|
||||
# The magic value 0 will set the option to the optimal size for the transport
|
||||
# protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
# sender_queue_size: 0
|
||||
|
||||
# write_timeout_uds is the timeout after which a UDS packet is dropped.
|
||||
# write_timeout_uds: 5000
|
||||
|
||||
# receive_mode determines the behavior of the client when receiving to many
|
||||
# metrics. The client will either drop the metrics if its buffers are
|
||||
# full (ChannelMode mode) or block the caller until the metric can be
|
||||
# handled (MutexMode mode). By default the client will MutexMode. This
|
||||
# option should be set to ChannelMode only when use under very high
|
||||
# load.
|
||||
#
|
||||
# MutexMode uses a mutex internally which is much faster than
|
||||
# channel but causes some lock contention when used with a high number
|
||||
# of threads. Mutex are sharded based on the metrics name which
|
||||
# limit mutex contention when goroutines send different metrics.
|
||||
#
|
||||
# ChannelMode: uses channel (of ChannelModeBufferSize size) to send
|
||||
# metrics and drop metrics if the channel is full. Sending metrics in
|
||||
# this mode is slower that MutexMode (because of the channel), but
|
||||
# will not block the application. This mode is made for application
|
||||
# using many goroutines, sending the same metrics at a very high
|
||||
# volume. The goal is to not slow down the application at the cost of
|
||||
# dropping metrics and having a lower max throughput.
|
||||
# receive_mode: 0
|
||||
|
||||
# channel_mode_buffer_size is the size of the channel holding incoming metrics
|
||||
# channel_mode_buffer_size: 0
|
||||
|
||||
# aggregation_flush_interval is the interval for the aggregator to flush metrics
|
||||
# aggregation_flush_interval: 0
|
||||
```
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
const (
|
||||
alarmT = item("Alarm")
|
||||
anomalyT = item("Anomaly")
|
||||
)
|
||||
|
||||
// batchAlarms generates alarm events and logs for Datadog.
|
||||
func (u *DatadogUnifi) batchAlarms(r report, event *unifi.Alarm) { // nolint:dupl
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"dst_port": strconv.Itoa(event.DestPort),
|
||||
"src_port": strconv.Itoa(event.SrcPort),
|
||||
"dest_ip": event.DestIP,
|
||||
"dst_mac": event.DstMAC,
|
||||
"host": event.Host,
|
||||
"msg": event.Msg,
|
||||
"src_ip": event.SrcIP,
|
||||
"src_mac": event.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", event.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", event.DestIPGeo.Longitude),
|
||||
"dst_ip_city": event.DestIPGeo.City,
|
||||
"dst_ip_continent_code": event.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": event.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": event.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": event.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", event.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", event.SourceIPGeo.Longitude),
|
||||
"src_ip_city": event.SourceIPGeo.City,
|
||||
"src_ip_continent_code": event.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": event.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": event.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": event.SourceIPGeo.Organization,
|
||||
"site_name": event.SiteName,
|
||||
"source": event.SourceName,
|
||||
"in_iface": event.InIface,
|
||||
"event_type": event.EventType,
|
||||
"subsystem": event.Subsystem,
|
||||
"archived": event.Archived.Txt,
|
||||
"usg_ip": event.USGIP,
|
||||
"proto": event.Proto,
|
||||
"key": event.Key,
|
||||
"catname": event.Catname,
|
||||
"app_proto": event.AppProto,
|
||||
"action": event.InnerAlertAction,
|
||||
}
|
||||
r.addCount(alarmT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("[%s][%s] Alarm at %s from %s", event.EventType, event.Catname, event.SiteName, event.SourceName)
|
||||
r.reportEvent(title, event.Datetime, event.Msg, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
||||
// batchAnomaly generates Anomalies from UniFi for Datadog.
|
||||
func (u *DatadogUnifi) batchAnomaly(r report, event *unifi.Anomaly) {
|
||||
if time.Since(event.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
r.addCount(anomalyT)
|
||||
|
||||
tagMap := cleanTags(map[string]string{
|
||||
"application": "unifi_anomaly",
|
||||
"source": event.SourceName,
|
||||
"site_name": event.SiteName,
|
||||
"device_mac": event.DeviceMAC,
|
||||
})
|
||||
tags := tagMapToTags(tagMap)
|
||||
|
||||
title := fmt.Sprintf("Anomaly detected at %s from %s", event.SiteName, event.SourceName)
|
||||
r.reportEvent(title, event.Datetime, event.Anomaly, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", event.Datetime.Unix(), title, event.Anomaly, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
|
@ -0,0 +1,189 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// batchClient generates Unifi Client datapoints for Datadog.
|
||||
// These points can be passed directly to Datadog.
|
||||
func (u *DatadogUnifi) batchClient(r report, s *unifi.Client) { // nolint: funlen
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"ap_name": s.ApName,
|
||||
"gw_name": s.GwName,
|
||||
"sw_name": s.SwName,
|
||||
"oui": s.Oui,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"radio_proto": s.RadioProto,
|
||||
"name": s.Name,
|
||||
"fixed_ip": s.FixedIP,
|
||||
"sw_port": s.SwPort.Txt,
|
||||
"os_class": s.OsClass.Txt,
|
||||
"os_name": s.OsName.Txt,
|
||||
"dev_cat": s.DevCat.Txt,
|
||||
"dev_id": s.DevID.Txt,
|
||||
"dev_vendor": s.DevVendor.Txt,
|
||||
"dev_family": s.DevFamily.Txt,
|
||||
"is_wired": s.IsWired.Txt,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
"use_fixed_ip": s.UseFixedIP.Txt,
|
||||
"channel": s.Channel.Txt,
|
||||
"vlan": s.Vlan.Txt,
|
||||
"hostname": s.Name,
|
||||
"essid": s.Essid,
|
||||
"bssid": s.Bssid,
|
||||
"ip": s.IP,
|
||||
}
|
||||
powerSaveEnabled := 0.0
|
||||
if s.PowersaveEnabled.Val {
|
||||
powerSaveEnabled = 1.0
|
||||
}
|
||||
data := map[string]float64{
|
||||
"anomalies": float64(s.Anomalies),
|
||||
"channel": s.Channel.Val,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"bytes_r": float64(s.BytesR),
|
||||
"ccq": float64(s.Ccq),
|
||||
"noise": float64(s.Noise),
|
||||
"powersave_enabled": powerSaveEnabled,
|
||||
"roam_count": float64(s.RoamCount),
|
||||
"rssi": float64(s.Rssi),
|
||||
"rx_bytes": float64(s.RxBytes),
|
||||
"rx_bytes_r": float64(s.RxBytesR),
|
||||
"rx_packets": float64(s.RxPackets),
|
||||
"rx_rate": float64(s.RxRate),
|
||||
"signal": float64(s.Signal),
|
||||
"tx_bytes": float64(s.TxBytes),
|
||||
"tx_bytes_r": float64(s.TxBytesR),
|
||||
"tx_packets": float64(s.TxPackets),
|
||||
"tx_retries": float64(s.TxRetries),
|
||||
"tx_power": float64(s.TxPower),
|
||||
"tx_rate": float64(s.TxRate),
|
||||
"uptime": float64(s.Uptime),
|
||||
"wifi_tx_attempts": float64(s.WifiTxAttempts),
|
||||
"wired_rx_bytes": float64(s.WiredRxBytes),
|
||||
"wired_rx_bytes-r": float64(s.WiredRxBytesR),
|
||||
"wired_rx_packets": float64(s.WiredRxPackets),
|
||||
"wired_tx_bytes": float64(s.WiredTxBytes),
|
||||
"wired_tx_bytes-r": float64(s.WiredTxBytesR),
|
||||
"wired_tx_packets": float64(s.WiredTxPackets),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("clients")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
|
||||
// totalsDPImap: controller, site, name (app/cat name), dpi.
|
||||
type totalsDPImap map[string]map[string]map[string]unifi.DPIData
|
||||
|
||||
func (u *DatadogUnifi) batchClientDPI(r report, v interface{}, appTotal, catTotal totalsDPImap) {
|
||||
s, ok := v.(*unifi.DPITable)
|
||||
if !ok {
|
||||
u.LogErrorf("invalid type given to batchClientDPI: %T", v)
|
||||
return
|
||||
}
|
||||
|
||||
for _, dpi := range s.ByApp {
|
||||
category := unifi.DPICats.Get(dpi.Cat)
|
||||
application := unifi.DPIApps.GetApp(dpi.Cat, dpi.App)
|
||||
fillDPIMapTotals(appTotal, application, s.SourceName, s.SiteName, dpi)
|
||||
fillDPIMapTotals(catTotal, category, s.SourceName, s.SiteName, dpi)
|
||||
|
||||
tags := map[string]string{
|
||||
"category": category,
|
||||
"application": application,
|
||||
"name": s.Name,
|
||||
"mac": s.MAC,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
}
|
||||
|
||||
data := map[string]float64{
|
||||
"tx_packets": float64(dpi.TxPackets),
|
||||
"rx_packets": float64(dpi.RxPackets),
|
||||
"tx_bytes": float64(dpi.TxBytes),
|
||||
"rx_bytes": float64(dpi.RxBytes),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("client_dpi")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
// fillDPIMapTotals fills in totals for categories and applications. maybe clients too.
|
||||
// This allows less processing in Datadog to produce total transfer data per cat or app.
|
||||
func fillDPIMapTotals(m totalsDPImap, name, controller, site string, dpi unifi.DPIData) {
|
||||
if m[controller] == nil {
|
||||
m[controller] = make(map[string]map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
if m[controller][site] == nil {
|
||||
m[controller][site] = make(map[string]unifi.DPIData)
|
||||
}
|
||||
|
||||
existing := m[controller][site][name]
|
||||
existing.TxPackets += dpi.TxPackets
|
||||
existing.RxPackets += dpi.RxPackets
|
||||
existing.TxBytes += dpi.TxBytes
|
||||
existing.RxBytes += dpi.RxBytes
|
||||
m[controller][site][name] = existing
|
||||
}
|
||||
|
||||
func reportClientDPItotals(r report, appTotal, catTotal totalsDPImap) {
|
||||
type all []struct {
|
||||
kind string
|
||||
val totalsDPImap
|
||||
}
|
||||
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
if appTotal != nil {
|
||||
appTotal = nil
|
||||
}
|
||||
|
||||
// This can allow us to aggregate other data types later, like `name` or `mac`, or anything else unifi adds.
|
||||
a := all{
|
||||
// This produces 7000+ metrics per site. Disabled for now.
|
||||
{
|
||||
kind: "application",
|
||||
val: appTotal,
|
||||
},
|
||||
{
|
||||
kind: "category",
|
||||
val: catTotal,
|
||||
},
|
||||
}
|
||||
|
||||
for _, k := range a {
|
||||
for controller, s := range k.val {
|
||||
for site, c := range s {
|
||||
for name, m := range c {
|
||||
tags := map[string]string{
|
||||
"category": "TOTAL",
|
||||
"application": "TOTAL",
|
||||
"name": "TOTAL",
|
||||
"mac": "TOTAL",
|
||||
"site_name": site,
|
||||
"source": controller,
|
||||
}
|
||||
tags[k.kind] = name
|
||||
|
||||
data := map[string]float64{
|
||||
"tx_packets": float64(m.TxPackets),
|
||||
"rx_packets": float64(m.RxPackets),
|
||||
"tx_bytes": float64(m.TxBytes),
|
||||
"rx_bytes": float64(m.RxBytes),
|
||||
}
|
||||
|
||||
metricName := metricNamespace("client_dpi")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,361 @@
|
|||
// Package datadogunifi provides the methods to turn UniFi measurements into Datadog
|
||||
// data points with appropriate tags and fields.
|
||||
package datadogunifi
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/unpoller/poller"
|
||||
"github.com/unpoller/unifi"
|
||||
"golift.io/cnfg"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInterval = 30 * time.Second
|
||||
minimumInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// Config defines the data needed to store metrics in Datadog.
|
||||
type Config struct {
|
||||
// Required Config
|
||||
|
||||
// Interval controls the collection and reporting interval
|
||||
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval,omitempty" yaml:"interval,omitempty"`
|
||||
|
||||
// Save data for dead ports? ie. ports that are down or disabled.
|
||||
DeadPorts bool `json:"dead_ports,omitempty" toml:"dead_ports,omitempty" xml:"dead_ports,omitempty" yaml:"dead_ports,omitempty"`
|
||||
|
||||
// Disable when true disables this output plugin
|
||||
Disable *bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
|
||||
// Address determines how to talk to the Datadog agent
|
||||
Address string `json:"address" toml:"address" xml:"address,attr" yaml:"address"`
|
||||
|
||||
// Optional Statsd Options - mirrored from statsd.Options
|
||||
|
||||
// Namespace to prepend to all metrics, events and service checks name.
|
||||
Namespace *string `json:"namespace" toml:"namespace" xml:"namespace,attr" yaml:"namespace"`
|
||||
|
||||
// Tags are global tags to be applied to every metrics, events and service checks.
|
||||
Tags []string `json:"tags" toml:"tags" xml:"tags,attr" yaml:"tags"`
|
||||
|
||||
// MaxBytesPerPayload is the maximum number of bytes a single payload will contain.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 1432 for UDP and 8192 for UDS.
|
||||
MaxBytesPerPayload *int `json:"max_bytes_per_payload" toml:"max_bytes_per_payload" xml:"max_bytes_per_payload,attr" yaml:"max_bytes_per_payload"`
|
||||
|
||||
// MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
|
||||
// This option can be set to `1` to create an unbuffered client.
|
||||
MaxMessagesPerPayload *int `json:"max_messages_per_payload" toml:"max_messages_per_payload" xml:"max_messages_per_payload,attr" yaml:"max_messages_per_payload"`
|
||||
|
||||
// BufferPoolSize is the size of the pool of buffers in number of buffers.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
BufferPoolSize *int `json:"buffer_pool_size" toml:"buffer_pool_size" xml:"buffer_pool_size,attr" yaml:"buffer_pool_size"`
|
||||
|
||||
// BufferFlushInterval is the interval after which the current buffer will get flushed.
|
||||
BufferFlushInterval *cnfg.Duration `json:"buffer_flush_interval" toml:"buffer_flush_interval" xml:"buffer_flush_interval,attr" yaml:"buffer_flush_interval"`
|
||||
|
||||
// BufferShardCount is the number of buffer "shards" that will be used.
|
||||
// Those shards allows the use of multiple buffers at the same time to reduce
|
||||
// lock contention.
|
||||
BufferShardCount *int `json:"buffer_shard_count" toml:"buffer_shard_count" xml:"buffer_shard_count,attr" yaml:"buffer_shard_count"`
|
||||
|
||||
// SenderQueueSize is the size of the sender queue in number of buffers.
|
||||
// The magic value 0 will set the option to the optimal size for the transport
|
||||
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
|
||||
SenderQueueSize *int `json:"sender_queue_size" toml:"sender_queue_size" xml:"sender_queue_size,attr" yaml:"sender_queue_size"`
|
||||
|
||||
// WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
|
||||
WriteTimeoutUDS *cnfg.Duration `json:"write_timeout_uds" toml:"write_timeout_uds" xml:"write_timeout_uds,attr" yaml:"write_timeout_uds"`
|
||||
|
||||
// ReceiveMode determines the behavior of the client when receiving to many
|
||||
// metrics. The client will either drop the metrics if its buffers are
|
||||
// full (ChannelMode mode) or block the caller until the metric can be
|
||||
// handled (MutexMode mode). By default the client will MutexMode. This
|
||||
// option should be set to ChannelMode only when use under very high
|
||||
// load.
|
||||
//
|
||||
// MutexMode uses a mutex internally which is much faster than
|
||||
// channel but causes some lock contention when used with a high number
|
||||
// of threads. Mutex are sharded based on the metrics name which
|
||||
// limit mutex contention when goroutines send different metrics.
|
||||
//
|
||||
// ChannelMode: uses channel (of ChannelModeBufferSize size) to send
|
||||
// metrics and drop metrics if the channel is full. Sending metrics in
|
||||
// this mode is slower that MutexMode (because of the channel), but
|
||||
// will not block the application. This mode is made for application
|
||||
// using many goroutines, sending the same metrics at a very high
|
||||
// volume. The goal is to not slow down the application at the cost of
|
||||
// dropping metrics and having a lower max throughput.
|
||||
ReceiveMode *statsd.ReceivingMode `json:"receive_mode" toml:"receive_mode" xml:"receive_mode,attr" yaml:"receive_mode"`
|
||||
|
||||
// ChannelModeBufferSize is the size of the channel holding incoming metrics
|
||||
ChannelModeBufferSize *int `json:"channel_mode_buffer_size" toml:"channel_mode_buffer_size" xml:"channel_mode_buffer_size,attr" yaml:"channel_mode_buffer_size"`
|
||||
|
||||
// AggregationFlushInterval is the interval for the aggregator to flush metrics
|
||||
AggregationFlushInterval *time.Duration `json:"aggregation_flush_interval" toml:"aggregation_flush_interval" xml:"aggregation_flush_interval,attr" yaml:"aggregation_flush_interval"`
|
||||
}
|
||||
|
||||
// Datadog allows the data to be context aware with configuration
|
||||
type Datadog struct {
|
||||
*Config `json:"datadog" toml:"datadog" xml:"datadog" yaml:"datadog"`
|
||||
options []statsd.Option // nolint
|
||||
}
|
||||
|
||||
// DatadogUnifi is returned by New() after you provide a Config.
|
||||
type DatadogUnifi struct {
|
||||
Collector poller.Collect
|
||||
datadog statsd.ClientInterface
|
||||
LastCheck time.Time
|
||||
*Datadog
|
||||
}
|
||||
|
||||
func init() { // nolint: gochecknoinits
|
||||
u := &DatadogUnifi{Datadog: &Datadog{}, LastCheck: time.Now()}
|
||||
|
||||
poller.NewOutput(&poller.Output{
|
||||
Name: "datadog",
|
||||
Config: u.Datadog,
|
||||
Method: u.Run,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) setConfigDefaults() {
|
||||
if u.Interval.Duration == 0 {
|
||||
u.Interval = cnfg.Duration{Duration: defaultInterval}
|
||||
} else if u.Interval.Duration < minimumInterval {
|
||||
u.Interval = cnfg.Duration{Duration: minimumInterval}
|
||||
}
|
||||
|
||||
u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
|
||||
|
||||
u.options = make([]statsd.Option, 0)
|
||||
|
||||
if u.Namespace != nil {
|
||||
u.options = append(u.options, statsd.WithNamespace(*u.Namespace))
|
||||
}
|
||||
|
||||
if u.Tags != nil && len(u.Tags) > 0 {
|
||||
u.options = append(u.options, statsd.WithTags(u.Tags))
|
||||
}
|
||||
|
||||
if u.MaxBytesPerPayload != nil {
|
||||
u.options = append(u.options, statsd.WithMaxBytesPerPayload(*u.MaxBytesPerPayload))
|
||||
}
|
||||
|
||||
if u.MaxMessagesPerPayload != nil {
|
||||
u.options = append(u.options, statsd.WithMaxMessagesPerPayload(*u.MaxMessagesPerPayload))
|
||||
}
|
||||
|
||||
if u.BufferPoolSize != nil {
|
||||
u.options = append(u.options, statsd.WithBufferPoolSize(*u.BufferPoolSize))
|
||||
}
|
||||
|
||||
if u.BufferFlushInterval != nil {
|
||||
u.options = append(u.options, statsd.WithBufferFlushInterval((*u.BufferFlushInterval).Duration))
|
||||
}
|
||||
|
||||
if u.BufferShardCount != nil {
|
||||
u.options = append(u.options, statsd.WithBufferShardCount(*u.BufferShardCount))
|
||||
}
|
||||
|
||||
if u.SenderQueueSize != nil {
|
||||
u.options = append(u.options, statsd.WithSenderQueueSize(*u.SenderQueueSize))
|
||||
}
|
||||
|
||||
if u.WriteTimeoutUDS != nil {
|
||||
u.options = append(u.options, statsd.WithWriteTimeoutUDS((*u.WriteTimeoutUDS).Duration))
|
||||
}
|
||||
|
||||
if u.ReceiveMode != nil {
|
||||
switch *u.ReceiveMode {
|
||||
case statsd.ChannelMode:
|
||||
u.options = append(u.options, statsd.WithChannelMode())
|
||||
case statsd.MutexMode:
|
||||
u.options = append(u.options, statsd.WithMutexMode())
|
||||
}
|
||||
}
|
||||
|
||||
if u.ChannelModeBufferSize != nil {
|
||||
u.options = append(u.options, statsd.WithChannelModeBufferSize(*u.ChannelModeBufferSize))
|
||||
}
|
||||
|
||||
if u.AggregationFlushInterval != nil {
|
||||
u.options = append(u.options, statsd.WithAggregationInterval(*u.AggregationFlushInterval))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Run runs a ticker to poll the unifi server and update Datadog.
|
||||
func (u *DatadogUnifi) Run(c poller.Collect) error {
|
||||
u.Collector = c
|
||||
disabled := u.Disable == nil || *u.Disable == true
|
||||
if disabled {
|
||||
u.LogDebugf("Datadog config is disabled, output is disabled.")
|
||||
return nil
|
||||
}
|
||||
if u.Config == nil && !disabled {
|
||||
u.LogErrorf("DataDog config is missing and is not disabled: Datadog output is disabled!")
|
||||
return nil
|
||||
}
|
||||
u.Logf("Datadog is configured.")
|
||||
u.setConfigDefaults()
|
||||
|
||||
var err error
|
||||
u.datadog, err = statsd.New(u.Address, u.options...)
|
||||
if err != nil {
|
||||
u.LogErrorf("Error configuration Datadog agent reporting: %+v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
u.PollController()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PollController runs forever, polling UniFi and pushing to Datadog
|
||||
// This is started by Run() or RunBoth() after everything is validated.
|
||||
func (u *DatadogUnifi) PollController() {
|
||||
interval := u.Interval.Round(time.Second)
|
||||
ticker := time.NewTicker(interval)
|
||||
u.Logf("Everything checks out! Poller started, interval=%+v", interval)
|
||||
|
||||
for u.LastCheck = range ticker.C {
|
||||
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
|
||||
if err != nil {
|
||||
u.LogErrorf("metric fetch for Datadog failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
|
||||
if err != nil {
|
||||
u.LogErrorf("event fetch for Datadog failed", err)
|
||||
continue
|
||||
}
|
||||
|
||||
report, err := u.ReportMetrics(metrics, events)
|
||||
if err != nil {
|
||||
// Is the agent down?
|
||||
u.LogErrorf("unable to report metrics and events", err)
|
||||
report.reportCount("unifi.collect.errors", 1, []string{})
|
||||
continue
|
||||
}
|
||||
report.reportCount("unifi.collect.success", 1, []string{})
|
||||
u.LogDatadogReport(report)
|
||||
}
|
||||
}
|
||||
|
||||
// ReportMetrics batches all device and client data into datadog data points.
|
||||
// Call this after you've collected all the data you care about.
|
||||
// Returns an error if datadog statsd calls fail, otherwise returns a report.
|
||||
func (u *DatadogUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
|
||||
r := &Report{
|
||||
Metrics: m,
|
||||
Events: e,
|
||||
Start: time.Now(),
|
||||
Counts: &Counts{Val: make(map[item]int)},
|
||||
Collector: u.Collector,
|
||||
client: u.datadog,
|
||||
}
|
||||
// batch all the points.
|
||||
u.loopPoints(r)
|
||||
r.End = time.Now()
|
||||
r.Elapsed = r.End.Sub(r.Start)
|
||||
r.reportTiming("unifi.collector_timing", r.Elapsed, []string{})
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// loopPoints collects all the data to immediately report to Datadog.
|
||||
func (u *DatadogUnifi) loopPoints(r report) {
|
||||
m := r.metrics()
|
||||
|
||||
for _, s := range m.RogueAPs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Sites {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.SitesDPI {
|
||||
u.reportSiteDPI(r, s.(*unifi.DPITable))
|
||||
}
|
||||
|
||||
for _, s := range m.Clients {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range m.Devices {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
for _, s := range r.events().Logs {
|
||||
u.switchExport(r, s)
|
||||
}
|
||||
|
||||
appTotal := make(totalsDPImap)
|
||||
catTotal := make(totalsDPImap)
|
||||
|
||||
for _, s := range m.ClientsDPI {
|
||||
u.batchClientDPI(r, s, appTotal, catTotal)
|
||||
}
|
||||
|
||||
reportClientDPItotals(r, appTotal, catTotal)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
|
||||
switch v := v.(type) {
|
||||
case *unifi.RogueAP:
|
||||
u.batchRogueAP(r, v)
|
||||
case *unifi.UAP:
|
||||
u.batchUAP(r, v)
|
||||
case *unifi.USW:
|
||||
u.batchUSW(r, v)
|
||||
case *unifi.USG:
|
||||
u.batchUSG(r, v)
|
||||
case *unifi.UXG:
|
||||
u.batchUXG(r, v)
|
||||
case *unifi.UDM:
|
||||
u.batchUDM(r, v)
|
||||
case *unifi.Site:
|
||||
u.reportSite(r, v)
|
||||
case *unifi.Client:
|
||||
u.batchClient(r, v)
|
||||
case *unifi.Event:
|
||||
u.batchEvent(r, v)
|
||||
case *unifi.IDS:
|
||||
u.batchIDS(r, v)
|
||||
case *unifi.Alarm:
|
||||
u.batchAlarms(r, v)
|
||||
case *unifi.Anomaly:
|
||||
u.batchAnomaly(r, v)
|
||||
default:
|
||||
u.LogErrorf("invalid export, type=%+v", reflect.TypeOf(v))
|
||||
}
|
||||
}
|
||||
|
||||
// LogDatadogReport writes a log message after exporting to Datadog.
|
||||
func (u *DatadogUnifi) LogDatadogReport(r *Report) {
|
||||
m := r.Metrics
|
||||
u.Logf("UniFi Metrics Recorded num_sites=%d num_sites_dpi=%d num_clients=%d num_clients_dpi=%d num_rogue_ap=%d num_devices=%d errors=%v elapsec=%v",
|
||||
len(m.Sites),
|
||||
len(m.SitesDPI),
|
||||
len(m.Clients),
|
||||
len(m.ClientsDPI),
|
||||
len(m.RogueAPs),
|
||||
len(m.Devices),
|
||||
r.Errors,
|
||||
r.Elapsed,
|
||||
)
|
||||
metricName := metricNamespace("collector")
|
||||
r.reportCount(metricName("num_sites"), int64(len(m.Sites)), u.Tags)
|
||||
r.reportCount(metricName("num_sites_dpi"), int64(len(m.SitesDPI)), u.Tags)
|
||||
r.reportCount(metricName("num_clients"), int64(len(m.Clients)), u.Tags)
|
||||
r.reportCount(metricName("num_clients_dpi"), int64(len(m.ClientsDPI)), u.Tags)
|
||||
r.reportCount(metricName("num_rogue_ap"), int64(len(m.RogueAPs)), u.Tags)
|
||||
r.reportCount(metricName("num_devices"), int64(len(m.Devices)), u.Tags)
|
||||
r.reportCount(metricName("num_errors"), int64(len(r.Errors)), u.Tags)
|
||||
r.reportTiming(metricName("elapsed_time"), r.Elapsed, u.Tags)
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// These constants are used as names for printed/logged counters.
|
||||
const (
|
||||
eventT = item("Event")
|
||||
idsT = item("IDS")
|
||||
)
|
||||
|
||||
// batchIDS generates intrusion detection datapoints for Datadog.
|
||||
func (u *DatadogUnifi) batchIDS(r report, i *unifi.IDS) { // nolint:dupl
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"dest_port": strconv.Itoa(i.DestPort),
|
||||
"src_port": strconv.Itoa(i.SrcPort),
|
||||
"dest_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"host": i.Host,
|
||||
"msg": i.Msg,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
|
||||
"dst_ip_city": i.DestIPGeo.City,
|
||||
"dst_ip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": i.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": i.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
|
||||
"src_ip_city": i.SourceIPGeo.City,
|
||||
"src_ip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": i.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": i.SourceIPGeo.Organization,
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"archived": i.Archived.Txt,
|
||||
"usg_ip": i.USGIP,
|
||||
"proto": i.Proto,
|
||||
"key": i.Key,
|
||||
"catname": i.Catname,
|
||||
"app_proto": i.AppProto,
|
||||
"action": i.InnerAlertAction,
|
||||
}
|
||||
|
||||
r.addCount(idsT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("Intrusion Detection at %s from %s", i.SiteName, i.SourceName)
|
||||
r.reportEvent(title, i.Datetime, i.Msg, tags)
|
||||
r.reportWarnLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
||||
// batchEvents generates events from UniFi for Datadog.
|
||||
func (u *DatadogUnifi) batchEvent(r report, i *unifi.Event) { // nolint: funlen
|
||||
if time.Since(i.Datetime) > u.Interval.Duration+time.Second {
|
||||
return // The event is older than our interval, ignore it.
|
||||
}
|
||||
|
||||
tagMap := map[string]string{
|
||||
"guest": i.Guest, // mac address
|
||||
"user": i.User, // mac address
|
||||
"host": i.Host, // usg device?
|
||||
"hostname": i.Hostname, // client name
|
||||
"dest_port": strconv.Itoa(i.DestPort),
|
||||
"src_port": strconv.Itoa(i.SrcPort),
|
||||
"dst_ip": i.DestIP,
|
||||
"dst_mac": i.DstMAC,
|
||||
"ip": i.IP,
|
||||
"src_ip": i.SrcIP,
|
||||
"src_mac": i.SrcMAC,
|
||||
"dst_ip_asn": fmt.Sprintf("%d", i.DestIPGeo.Asn),
|
||||
"dst_ip_latitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Latitude),
|
||||
"dst_ip_longitude": fmt.Sprintf("%0.6f", i.DestIPGeo.Longitude),
|
||||
"dst_ip_city": i.DestIPGeo.City,
|
||||
"dst_ip_continent_code": i.DestIPGeo.ContinentCode,
|
||||
"dst_ip_country_code": i.DestIPGeo.CountryCode,
|
||||
"dst_ip_country_name": i.DestIPGeo.CountryName,
|
||||
"dst_ip_organization": i.DestIPGeo.Organization,
|
||||
"src_ip_asn": fmt.Sprintf("%d", i.SourceIPGeo.Asn),
|
||||
"src_ip_latitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Latitude),
|
||||
"src_ip_longitude": fmt.Sprintf("%0.6f", i.SourceIPGeo.Longitude),
|
||||
"src_ip_city": i.SourceIPGeo.City,
|
||||
"src_ip_continent_code": i.SourceIPGeo.ContinentCode,
|
||||
"src_ip_country_code": i.SourceIPGeo.CountryCode,
|
||||
"src_ip_country_name": i.SourceIPGeo.CountryName,
|
||||
"src_ip_organization": i.SourceIPGeo.Organization,
|
||||
"admin": i.Admin, // username
|
||||
"site_name": i.SiteName,
|
||||
"source": i.SourceName,
|
||||
"ap_from": i.ApFrom,
|
||||
"ap_to": i.ApTo,
|
||||
"ap": i.Ap,
|
||||
"ap_name": i.ApName,
|
||||
"gw": i.Gw,
|
||||
"gw_name": i.GwName,
|
||||
"sw": i.Sw,
|
||||
"sw_name": i.SwName,
|
||||
"catname": i.Catname,
|
||||
"radio": i.Radio,
|
||||
"radio_from": i.RadioFrom,
|
||||
"radio_to": i.RadioTo,
|
||||
"key": i.Key,
|
||||
"in_iface": i.InIface,
|
||||
"event_type": i.EventType,
|
||||
"subsystem": i.Subsystem,
|
||||
"ssid": i.SSID,
|
||||
"is_admin": i.IsAdmin.Txt,
|
||||
"channel": i.Channel.Txt,
|
||||
"channel_from": i.ChannelFrom.Txt,
|
||||
"channel_to": i.ChannelTo.Txt,
|
||||
"usg_ip": i.USGIP,
|
||||
"network": i.Network,
|
||||
"app_proto": i.AppProto,
|
||||
"proto": i.Proto,
|
||||
"action": i.InnerAlertAction,
|
||||
}
|
||||
|
||||
r.addCount(eventT)
|
||||
|
||||
tagMap = cleanTags(tagMap)
|
||||
tags := tagMapToTags(tagMap)
|
||||
title := fmt.Sprintf("Unifi Event at %s from %s", i.SiteName, i.SourceName)
|
||||
r.reportEvent(title, i.Datetime, i.Msg, tags)
|
||||
r.reportInfoLog(fmt.Sprintf("[%d] %s: %s - %s", i.Datetime.Unix(), title, i.Msg, tagMapToSimpleStrings(tagMap)))
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
module github.com/unpoller/datadogunifi
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e
|
||||
github.com/unpoller/unifi v0.0.0-20221124010147-8d83427af67b
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect
|
||||
golift.io/cnfg v0.0.7
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
)
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible h1:Dq8Dr+4sV1gBO1sHDWdW+4G+PdsA+YSJOK925MxrrCY=
|
||||
github.com/DataDog/datadog-go v4.0.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c h1:zqmyTlQyufRC65JnImJ6H1Sf7BDj8bG31EV919NVEQc=
|
||||
github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e h1:tNBIBCmtc7whuhkjKyEzpU3OHzYHyGCBy/LERhHxh3A=
|
||||
github.com/unpoller/poller v0.0.0-20210623104748-50161c195d5e/go.mod h1:AbDp60t5WlLSRELAliMJ0RFQpm/0yXpyolVSZqNtero=
|
||||
github.com/unpoller/unifi v0.0.0-20221124010147-8d83427af67b h1:QMDlntRuc73sVRBMa3VO82p9gDzyub4sgLxaKwK4nHo=
|
||||
github.com/unpoller/unifi v0.0.0-20221124010147-8d83427af67b/go.mod h1:pJGPtjikPcYO+rZMpgYOj6Zs044Dl4R+u3MsV3TMenk=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golift.io/cnfg v0.0.7 h1:qkNpP5Bq+5Gtoc6HcI8kapMD5zFOVan6qguxqBQF3OY=
|
||||
golift.io/cnfg v0.0.7/go.mod h1:AsB0DJe7nv0bizKaoy3e3MjjOF7upTpMOMvsfv4CNNk=
|
||||
golift.io/version v0.0.2 h1:i0gXRuSDHKs4O0sVDUg4+vNIuOxYoXhaxspftu2FRTE=
|
||||
golift.io/version v0.0.2/go.mod h1:76aHNz8/Pm7CbuxIsDi97jABL5Zui3f2uZxDm4vB6hU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
package datadogunifi
|
||||
|
||||
// Logf logs a message.
|
||||
func (u *DatadogUnifi) Logf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.Logf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// LogErrorf logs an error message.
|
||||
func (u *DatadogUnifi) LogErrorf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.LogErrorf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// LogDebugf logs a debug message.
|
||||
func (u *DatadogUnifi) LogDebugf(msg string, v ...interface{}) {
|
||||
if u.Collector != nil {
|
||||
u.Collector.LogDebugf(msg, v...)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func tag(name string, value interface{}) string {
|
||||
return fmt.Sprintf("%s:%v", name, value)
|
||||
}
|
||||
|
||||
func tagMapToTags(tagMap map[string]string) []string {
|
||||
tags := make([]string, 0)
|
||||
for k, v := range tagMap {
|
||||
tags = append(tags, tag(k, v))
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func tagMapToSimpleStrings(tagMap map[string]string) string {
|
||||
result := ""
|
||||
for k, v := range tagMap {
|
||||
result = fmt.Sprintf("%s%s=\"%v\", ", result, k, v)
|
||||
}
|
||||
return strings.TrimRight(result, ", ")
|
||||
}
|
||||
|
||||
func metricNamespace(namespace string) func(string) string {
|
||||
return func(name string) string {
|
||||
return fmt.Sprintf("unifi.%s.%s", namespace, name)
|
||||
}
|
||||
}
|
||||
|
||||
func reportGaugeForFloat64Map(r report, metricName func(string) string, data map[string]float64, tags map[string]string) {
|
||||
for name, value := range data {
|
||||
r.reportGauge(metricName(name), value, tagMapToTags(tags))
|
||||
}
|
||||
}
|
||||
|
||||
// cleanTags removes any tag that is empty.
|
||||
func cleanTags(tags map[string]string) map[string]string {
|
||||
for i := range tags {
|
||||
if tags[i] == "" {
|
||||
delete(tags, i)
|
||||
}
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/unpoller/poller"
|
||||
)
|
||||
|
||||
// Report is a will report the current collection run data.
|
||||
type Report struct {
|
||||
Metrics *poller.Metrics
|
||||
Events *poller.Events
|
||||
Errors []error
|
||||
Counts *Counts
|
||||
Start time.Time
|
||||
End time.Time
|
||||
Elapsed time.Duration
|
||||
|
||||
Collector poller.Collect
|
||||
|
||||
Total int
|
||||
Fields int
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
client statsd.ClientInterface
|
||||
}
|
||||
|
||||
// Counts holds counters and has a lock to deal with routines.
|
||||
type Counts struct {
|
||||
Val map[item]int
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type report interface {
|
||||
add()
|
||||
done()
|
||||
error(err error)
|
||||
metrics() *poller.Metrics
|
||||
events() *poller.Events
|
||||
addCount(item, ...int)
|
||||
|
||||
reportGauge(name string, value float64, tags []string) error
|
||||
reportCount(name string, value int64, tags []string) error
|
||||
reportDistribution(name string, value float64, tags []string) error
|
||||
reportTiming(name string, value time.Duration, tags []string) error
|
||||
reportEvent(title string, date time.Time, message string, tags []string) error
|
||||
reportInfoLog(message string, f ...interface{})
|
||||
reportWarnLog(message string, f ...interface{})
|
||||
reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error
|
||||
}
|
||||
|
||||
func (r *Report) add() {
|
||||
r.wg.Add(1)
|
||||
}
|
||||
|
||||
func (r *Report) done() {
|
||||
r.wg.Done()
|
||||
}
|
||||
|
||||
func (r *Report) metrics() *poller.Metrics {
|
||||
return r.Metrics
|
||||
}
|
||||
|
||||
func (r *Report) events() *poller.Events {
|
||||
return r.Events
|
||||
}
|
||||
|
||||
/* The following methods are not thread safe. */
|
||||
|
||||
type item string
|
||||
|
||||
func (r *Report) addCount(name item, counts ...int) {
|
||||
r.Counts.Lock()
|
||||
defer r.Counts.Unlock()
|
||||
|
||||
if len(counts) == 0 {
|
||||
r.Counts.Val[name]++
|
||||
}
|
||||
|
||||
for _, c := range counts {
|
||||
r.Counts.Val[name] += c
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) error(err error) {
|
||||
if err != nil {
|
||||
r.Errors = append(r.Errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) reportGauge(name string, value float64, tags []string) error {
|
||||
return r.client.Gauge(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportCount(name string, value int64, tags []string) error {
|
||||
return r.client.Count(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportDistribution(name string, value float64, tags []string) error {
|
||||
return r.client.Distribution(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportTiming(name string, value time.Duration, tags []string) error {
|
||||
return r.client.Timing(name, value, tags, 1.0)
|
||||
}
|
||||
|
||||
func (r *Report) reportEvent(title string, date time.Time, message string, tags []string) error {
|
||||
if date.IsZero() {
|
||||
date = time.Now()
|
||||
}
|
||||
return r.client.Event(&statsd.Event{
|
||||
Title: title,
|
||||
Text: message,
|
||||
Timestamp: date,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Report) reportInfoLog(message string, f ...interface{}) {
|
||||
r.Collector.Logf(message, f)
|
||||
}
|
||||
|
||||
func (r *Report) reportWarnLog(message string, f ...interface{}) {
|
||||
r.Collector.Logf(message, f)
|
||||
}
|
||||
|
||||
func (r *Report) reportServiceCheck(name string, status statsd.ServiceCheckStatus, message string, tags []string) error {
|
||||
return r.client.ServiceCheck(&statsd.ServiceCheck{
|
||||
Name: name,
|
||||
Status: status,
|
||||
Timestamp: time.Now(),
|
||||
Message: message,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// reportSite generates Unifi Sites' datapoints for Datadog.
|
||||
// These points can be passed directly to Datadog.
|
||||
func (u *DatadogUnifi) reportSite(r report, s *unifi.Site) {
|
||||
metricName := metricNamespace("subsystems")
|
||||
|
||||
for _, h := range s.Health {
|
||||
tags := []string{
|
||||
tag("name", s.Name),
|
||||
tag("site_name", s.SiteName),
|
||||
tag("source", s.SourceName),
|
||||
tag("desc", s.Desc),
|
||||
tag("status", h.Status),
|
||||
tag("subsystem", h.Subsystem),
|
||||
tag("wan_ip", h.WanIP),
|
||||
tag("gw_name", h.GwName),
|
||||
tag("lan_ip", h.LanIP),
|
||||
}
|
||||
|
||||
data := map[string]float64{
|
||||
"num_user": h.NumUser.Val,
|
||||
"num_guest": h.NumGuest.Val,
|
||||
"num_iot": h.NumIot.Val,
|
||||
"tx_bytes_r": h.TxBytesR.Val,
|
||||
"rx_bytes_r": h.RxBytesR.Val,
|
||||
"num_ap": h.NumAp.Val,
|
||||
"num_adopted": h.NumAdopted.Val,
|
||||
"num_disabled": h.NumDisabled.Val,
|
||||
"num_disconnected": h.NumDisconnected.Val,
|
||||
"num_pending": h.NumPending.Val,
|
||||
"num_gw": h.NumGw.Val,
|
||||
"num_sta": h.NumSta.Val,
|
||||
"gw_cpu": h.GwSystemStats.CPU.Val,
|
||||
"gw_mem": h.GwSystemStats.Mem.Val,
|
||||
"gw_uptime": h.GwSystemStats.Uptime.Val,
|
||||
"latency": h.Latency.Val,
|
||||
"uptime": h.Uptime.Val,
|
||||
"drops": h.Drops.Val,
|
||||
"xput_up": h.XputUp.Val,
|
||||
"xput_down": h.XputDown.Val,
|
||||
"speedtest_ping": h.SpeedtestPing.Val,
|
||||
"speedtest_lastrun": h.SpeedtestLastrun.Val,
|
||||
"num_sw": h.NumSw.Val,
|
||||
"remote_user_num_active": h.RemoteUserNumActive.Val,
|
||||
"remote_user_num_inactive": h.RemoteUserNumInactive.Val,
|
||||
"remote_user_rx_bytes": h.RemoteUserRxBytes.Val,
|
||||
"remote_user_tx_bytes": h.RemoteUserTxBytes.Val,
|
||||
"remote_user_rx_packets": h.RemoteUserRxPackets.Val,
|
||||
"remote_user_tx_packets": h.RemoteUserTxPackets.Val,
|
||||
"num_new_alarms": s.NumNewAlarms.Val,
|
||||
}
|
||||
|
||||
for name, value := range data {
|
||||
r.reportGauge(metricName(name), value, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) reportSiteDPI(r report, s *unifi.DPITable) {
|
||||
for _, dpi := range s.ByApp {
|
||||
metricName := metricNamespace("sitedpi")
|
||||
|
||||
tags := []string{
|
||||
tag("category", unifi.DPICats.Get(dpi.Cat)),
|
||||
tag("application", unifi.DPIApps.GetApp(dpi.Cat, dpi.App)),
|
||||
tag("site_name", s.SiteName),
|
||||
tag("source", s.SourceName),
|
||||
}
|
||||
|
||||
r.reportCount(metricName("tx_packets"), dpi.TxPackets, tags)
|
||||
r.reportCount(metricName("rx_packets"), dpi.RxPackets, tags)
|
||||
r.reportCount(metricName("tx_bytes"), dpi.TxBytes, tags)
|
||||
r.reportCount(metricName("rx_bytes"), dpi.RxBytes, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,235 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uapT is used as a name for printed/logged counters.
|
||||
const uapT = item("UAP")
|
||||
|
||||
// batchRogueAP generates metric points for neighboring access points.
|
||||
func (u *DatadogUnifi) batchRogueAP(r report, s *unifi.RogueAP) {
|
||||
if s.Age.Val == 0 {
|
||||
return // only keep metrics for things that are recent.
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"security": s.Security,
|
||||
"oui": s.Oui,
|
||||
"band": s.Band,
|
||||
"mac": s.Bssid,
|
||||
"ap_mac": s.ApMac,
|
||||
"radio": s.Radio,
|
||||
"radio_name": s.RadioName,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Essid,
|
||||
"source": s.SourceName,
|
||||
})
|
||||
|
||||
data := map[string]float64{
|
||||
"age": s.Age.Val,
|
||||
"bw": s.Bw.Val,
|
||||
"center_freq": s.CenterFreq.Val,
|
||||
"channel": float64(s.Channel),
|
||||
"freq": s.Freq.Val,
|
||||
"noise": s.Noise.Val,
|
||||
"rssi": s.Rssi.Val,
|
||||
"rssi_age": s.RssiAge.Val,
|
||||
"signal": s.Signal.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_rogue")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
|
||||
// batchUAP generates Wireless-Access-Point datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUAP(r report, s *unifi.UAP) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data := CombineFloat64(u.processUAPstats(s.Stat.Ap), u.batchSysStats(s.SysStats, s.SystemStats))
|
||||
data["bytes"] = s.Bytes.Val
|
||||
data["last_seen"] = s.LastSeen.Val
|
||||
data["rx_bytes"] = s.RxBytes.Val
|
||||
data["tx_bytes"] = s.TxBytes.Val
|
||||
data["uptime"] = s.Uptime.Val
|
||||
data["user_num_sta"] = s.UserNumSta.Val
|
||||
data["guest_num_sta"] = s.GuestNumSta.Val
|
||||
data["num_sta"] = s.NumSta.Val
|
||||
|
||||
r.addCount(uapT)
|
||||
|
||||
metricName := metricNamespace("uap")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.processVAPTable(r, tags, s.VapTable)
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) processUAPstats(ap *unifi.Ap) map[string]float64 {
|
||||
if ap == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
// Accumulative Statistics.
|
||||
return map[string]float64{
|
||||
"stat_user-rx_packets": ap.UserRxPackets.Val,
|
||||
"stat_guest-rx_packets": ap.GuestRxPackets.Val,
|
||||
"stat_rx_packets": ap.RxPackets.Val,
|
||||
"stat_user-rx_bytes": ap.UserRxBytes.Val,
|
||||
"stat_guest-rx_bytes": ap.GuestRxBytes.Val,
|
||||
"stat_rx_bytes": ap.RxBytes.Val,
|
||||
"stat_user-rx_errors": ap.UserRxErrors.Val,
|
||||
"stat_guest-rx_errors": ap.GuestRxErrors.Val,
|
||||
"stat_rx_errors": ap.RxErrors.Val,
|
||||
"stat_user-rx_dropped": ap.UserRxDropped.Val,
|
||||
"stat_guest-rx_dropped": ap.GuestRxDropped.Val,
|
||||
"stat_rx_dropped": ap.RxDropped.Val,
|
||||
"stat_user-rx_crypts": ap.UserRxCrypts.Val,
|
||||
"stat_guest-rx_crypts": ap.GuestRxCrypts.Val,
|
||||
"stat_rx_crypts": ap.RxCrypts.Val,
|
||||
"stat_user-rx_frags": ap.UserRxFrags.Val,
|
||||
"stat_guest-rx_frags": ap.GuestRxFrags.Val,
|
||||
"stat_rx_frags": ap.RxFrags.Val,
|
||||
"stat_user-tx_packets": ap.UserTxPackets.Val,
|
||||
"stat_guest-tx_packets": ap.GuestTxPackets.Val,
|
||||
"stat_tx_packets": ap.TxPackets.Val,
|
||||
"stat_user-tx_bytes": ap.UserTxBytes.Val,
|
||||
"stat_guest-tx_bytes": ap.GuestTxBytes.Val,
|
||||
"stat_tx_bytes": ap.TxBytes.Val,
|
||||
"stat_user-tx_errors": ap.UserTxErrors.Val,
|
||||
"stat_guest-tx_errors": ap.GuestTxErrors.Val,
|
||||
"stat_tx_errors": ap.TxErrors.Val,
|
||||
"stat_user-tx_dropped": ap.UserTxDropped.Val,
|
||||
"stat_guest-tx_dropped": ap.GuestTxDropped.Val,
|
||||
"stat_tx_dropped": ap.TxDropped.Val,
|
||||
"stat_user-tx_retries": ap.UserTxRetries.Val,
|
||||
"stat_guest-tx_retries": ap.GuestTxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
// processVAPTable creates points for Wifi Radios. This works with several types of UAP-capable devices.
|
||||
func (u *DatadogUnifi) processVAPTable(r report, t map[string]string, vt unifi.VapTable) { // nolint: funlen
|
||||
for _, s := range vt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"ap_mac": s.ApMac,
|
||||
"bssid": s.Bssid,
|
||||
"id": s.ID,
|
||||
"name": s.Name,
|
||||
"radio_name": s.RadioName,
|
||||
"radio": s.Radio,
|
||||
"essid": s.Essid,
|
||||
"site_id": s.SiteID,
|
||||
"usage": s.Usage,
|
||||
"state": s.State,
|
||||
"is_guest": s.IsGuest.Txt,
|
||||
}
|
||||
data := map[string]float64{
|
||||
"ccq": float64(s.Ccq),
|
||||
"mac_filter_rejections": float64(s.MacFilterRejections),
|
||||
"num_satisfaction_sta": s.NumSatisfactionSta.Val,
|
||||
"avg_client_signal": s.AvgClientSignal.Val,
|
||||
"satisfaction": s.Satisfaction.Val,
|
||||
"satisfaction_now": s.SatisfactionNow.Val,
|
||||
"num_sta": float64(s.NumSta),
|
||||
"channel": s.Channel.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"rx_crypts": s.RxCrypts.Val,
|
||||
"rx_dropped": s.RxDropped.Val,
|
||||
"rx_errors": s.RxErrors.Val,
|
||||
"rx_frags": s.RxFrags.Val,
|
||||
"rx_nwids": s.RxNwids.Val,
|
||||
"rx_packets": s.RxPackets.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"tx_dropped": s.TxDropped.Val,
|
||||
"tx_errors": s.TxErrors.Val,
|
||||
"tx_packets": s.TxPackets.Val,
|
||||
"tx_power": s.TxPower.Val,
|
||||
"tx_retries": s.TxRetries.Val,
|
||||
"tx_combined_retries": s.TxCombinedRetries.Val,
|
||||
"tx_data_mpdu_bytes": s.TxDataMpduBytes.Val,
|
||||
"tx_rts_retries": s.TxRtsRetries.Val,
|
||||
"tx_success": s.TxSuccess.Val,
|
||||
"tx_total": s.TxTotal.Val,
|
||||
"tx_tcp_goodbytes": s.TxTCPStats.Goodbytes.Val,
|
||||
"tx_tcp_lat_avg": s.TxTCPStats.LatAvg.Val,
|
||||
"tx_tcp_lat_max": s.TxTCPStats.LatMax.Val,
|
||||
"tx_tcp_lat_min": s.TxTCPStats.LatMin.Val,
|
||||
"rx_tcp_goodbytes": s.RxTCPStats.Goodbytes.Val,
|
||||
"rx_tcp_lat_avg": s.RxTCPStats.LatAvg.Val,
|
||||
"rx_tcp_lat_max": s.RxTCPStats.LatMax.Val,
|
||||
"rx_tcp_lat_min": s.RxTCPStats.LatMin.Val,
|
||||
"wifi_tx_latency_mov_avg": s.WifiTxLatencyMov.Avg.Val,
|
||||
"wifi_tx_latency_mov_max": s.WifiTxLatencyMov.Max.Val,
|
||||
"wifi_tx_latency_mov_min": s.WifiTxLatencyMov.Min.Val,
|
||||
"wifi_tx_latency_mov_total": s.WifiTxLatencyMov.Total.Val,
|
||||
"wifi_tx_latency_mov_cuont": s.WifiTxLatencyMov.TotalCount.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_vaps")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) processRadTable(r report, t map[string]string, rt unifi.RadioTable, rts unifi.RadioTableStats) {
|
||||
for _, p := range rt {
|
||||
tags := map[string]string{
|
||||
"device_name": t["name"],
|
||||
"site_name": t["site_name"],
|
||||
"source": t["source"],
|
||||
"channel": p.Channel.Txt,
|
||||
"radio": p.Radio,
|
||||
"ht": p.Ht.Txt,
|
||||
}
|
||||
data := map[string]float64{
|
||||
"current_antenna_gain": p.CurrentAntennaGain.Val,
|
||||
"max_txpower": p.MaxTxpower.Val,
|
||||
"min_txpower": p.MinTxpower.Val,
|
||||
"nss": p.Nss.Val,
|
||||
"radio_caps": p.RadioCaps.Val,
|
||||
}
|
||||
|
||||
for _, t := range rts {
|
||||
if t.Name == p.Name {
|
||||
data["ast_be_xmit"] = t.AstBeXmit.Val
|
||||
data["channel"] = t.Channel.Val
|
||||
data["cu_self_rx"] = t.CuSelfRx.Val
|
||||
data["cu_self_tx"] = t.CuSelfTx.Val
|
||||
data["cu_total"] = t.CuTotal.Val
|
||||
data["ext_channel"] = t.Extchannel.Val
|
||||
data["gain"] = t.Gain.Val
|
||||
data["guest_num_sta"] = t.GuestNumSta.Val
|
||||
data["num_sta"] = t.NumSta.Val
|
||||
data["tx_packets"] = t.TxPackets.Val
|
||||
data["tx_power"] = t.TxPower.Val
|
||||
data["tx_retries"] = t.TxRetries.Val
|
||||
data["user_num_sta"] = t.UserNumSta.Val
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
metricName := metricNamespace("uap_radios")
|
||||
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// udmT is used as a name for printed/logged counters.
|
||||
const udmT = item("UDM")
|
||||
|
||||
// Combine concatenates N maps. This will delete things if not used with caution.
|
||||
func Combine(in ...map[string]interface{}) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
|
||||
for i := range in {
|
||||
for k := range in[i] {
|
||||
out[k] = in[i][k]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// CombineFloat64 concatenates N maps. This will delete things if not used with caution.
|
||||
func CombineFloat64(in ...map[string]float64) map[string]float64 {
|
||||
out := make(map[string]float64)
|
||||
|
||||
for i := range in {
|
||||
for k := range in[i] {
|
||||
out[k] = in[i][k]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// batchSysStats is used by all device types.
|
||||
func (u *DatadogUnifi) batchSysStats(s unifi.SysStats, ss unifi.SystemStats) map[string]float64 {
|
||||
m := map[string]float64{
|
||||
"loadavg_1": s.Loadavg1.Val,
|
||||
"loadavg_5": s.Loadavg5.Val,
|
||||
"loadavg_15": s.Loadavg15.Val,
|
||||
"mem_used": s.MemUsed.Val,
|
||||
"mem_buffer": s.MemBuffer.Val,
|
||||
"mem_total": s.MemTotal.Val,
|
||||
"cpu": ss.CPU.Val,
|
||||
"mem": ss.Mem.Val,
|
||||
"system_uptime": ss.Uptime.Val,
|
||||
}
|
||||
|
||||
for k, v := range ss.Temps {
|
||||
temp, _ := strconv.Atoi(strings.Split(v, " ")[0])
|
||||
k = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(k, " ", "_"), ")", ""), "(", "")
|
||||
|
||||
if temp != 0 && k != "" {
|
||||
m["temp_"+strings.ToLower(k)] = float64(temp)
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUDMtemps(temps []unifi.Temperature) map[string]float64 {
|
||||
output := make(map[string]float64)
|
||||
|
||||
for _, t := range temps {
|
||||
output["temp_"+t.Name] = t.Value
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUDMstorage(storage []*unifi.Storage) map[string]float64 {
|
||||
output := make(map[string]float64)
|
||||
|
||||
for _, t := range storage {
|
||||
output["storage_"+t.Name+"_size"] = t.Size.Val
|
||||
output["storage_"+t.Name+"_used"] = t.Used.Val
|
||||
|
||||
if t.Size.Val != 0 && t.Used.Val != 0 && t.Used.Val < t.Size.Val {
|
||||
output["storage_"+t.Name+"_pct"] = t.Used.Val / t.Size.Val * 100 //nolint:gomnd
|
||||
} else {
|
||||
output["storage_"+t.Name+"_pct"] = 0
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// batchUDM generates Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUDM(r report, s *unifi.UDM) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(udmT)
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
metricName = metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
|
||||
if s.Stat.Ap == nil {
|
||||
return // we're done now. the following code process UDM (non-pro) UAP data.
|
||||
}
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = u.processUAPstats(s.Stat.Ap)
|
||||
data["bytes"] = s.Bytes.Val
|
||||
data["last_seen"] = s.LastSeen.Val
|
||||
data["rx_bytes"] = s.RxBytes.Val
|
||||
data["tx_bytes"] = s.TxBytes.Val
|
||||
data["uptime"] = s.Uptime.Val
|
||||
data["state"] = s.State.Val
|
||||
data["user_num_sta"] = s.UserNumSta.Val
|
||||
data["guest_num_sta"] = s.GuestNumSta.Val
|
||||
data["num_sta"] = s.NumSta.Val
|
||||
|
||||
metricName = metricNamespace("uap")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.processRadTable(r, tags, *s.RadioTable, *s.RadioTableStats)
|
||||
u.processVAPTable(r, tags, *s.VapTable)
|
||||
}
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// usgT is used as a name for printed/logged counters.
|
||||
const usgT = item("USG")
|
||||
|
||||
// batchUSG generates Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUSG(r report, s *unifi.USG) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
}
|
||||
data := CombineFloat64(
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(usgT)
|
||||
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSGstats(ss unifi.SpeedtestStatus, gw *unifi.Gw, ul unifi.Uplink) map[string]float64 {
|
||||
if gw == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
return map[string]float64{
|
||||
"uplink_latency": ul.Latency.Val,
|
||||
"uplink_speed": ul.Speed.Val,
|
||||
"speedtest_status_latency": ss.Latency.Val,
|
||||
"speedtest_status_runtime": ss.Runtime.Val,
|
||||
"speedtest_status_rundate": ss.Rundate.Val,
|
||||
"speedtest_status_ping": ss.StatusPing.Val,
|
||||
"speedtest_status_xput_download": ss.XputDownload.Val,
|
||||
"speedtest_status_xput_upload": ss.XputUpload.Val,
|
||||
"lan_rx_bytes": gw.LanRxBytes.Val,
|
||||
"lan_rx_packets": gw.LanRxPackets.Val,
|
||||
"lan_tx_bytes": gw.LanTxBytes.Val,
|
||||
"lan_tx_packets": gw.LanTxPackets.Val,
|
||||
"lan_rx_dropped": gw.LanRxDropped.Val,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSGwans(r report, tags map[string]string, wans ...unifi.Wan) {
|
||||
for _, wan := range wans {
|
||||
if !wan.Up.Val {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"ip": wan.IP,
|
||||
"purpose": wan.Name,
|
||||
"mac": wan.Mac,
|
||||
"ifname": wan.Ifname,
|
||||
"type": wan.Type,
|
||||
"up": wan.Up.Txt,
|
||||
"enabled": wan.Enable.Txt,
|
||||
"gateway": wan.Gateway,
|
||||
})
|
||||
|
||||
fullDuplex := 0.0
|
||||
if wan.FullDuplex.Val {
|
||||
fullDuplex = 1.0
|
||||
}
|
||||
data := map[string]float64{
|
||||
"bytes_r": wan.BytesR.Val,
|
||||
"full_duplex": fullDuplex,
|
||||
"max_speed": wan.MaxSpeed.Val,
|
||||
"rx_bytes": wan.RxBytes.Val,
|
||||
"rx_bytes_r": wan.RxBytesR.Val,
|
||||
"rx_dropped": wan.RxDropped.Val,
|
||||
"rx_errors": wan.RxErrors.Val,
|
||||
"rx_broadcast": wan.RxBroadcast.Val,
|
||||
"rx_multicast": wan.RxMulticast.Val,
|
||||
"rx_packets": wan.RxPackets.Val,
|
||||
"speed": wan.Speed.Val,
|
||||
"tx_bytes": wan.TxBytes.Val,
|
||||
"tx_bytes_r": wan.TxBytesR.Val,
|
||||
"tx_dropped": wan.TxDropped.Val,
|
||||
"tx_errors": wan.TxErrors.Val,
|
||||
"tx_packets": wan.TxPackets.Val,
|
||||
"tx_broadcast": wan.TxBroadcast.Val,
|
||||
"tx_multicast": wan.TxMulticast.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usg.wan_ports")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchNetTable(r report, tags map[string]string, nt unifi.NetworkTable) {
|
||||
for _, p := range nt {
|
||||
tags := cleanTags(map[string]string{
|
||||
"device_name": tags["name"],
|
||||
"site_name": tags["site_name"],
|
||||
"source": tags["source"],
|
||||
"up": p.Up.Txt,
|
||||
"enabled": p.Enabled.Txt,
|
||||
"ip": p.IP,
|
||||
"mac": p.Mac,
|
||||
"name": p.Name,
|
||||
"domain_name": p.DomainName,
|
||||
"purpose": p.Purpose,
|
||||
"is_guest": p.IsGuest.Txt,
|
||||
})
|
||||
data := map[string]float64{
|
||||
"num_sta": p.NumSta.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usg.networks")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uswT is used as a name for printed/logged counters.
|
||||
const uswT = item("USW")
|
||||
|
||||
// batchUSW generates Unifi Switch datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUSW(r report, s *unifi.USW) {
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"fan_level": s.FanLevel.Val,
|
||||
"general_temperature": s.GeneralTemperature.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
})
|
||||
|
||||
r.addCount(uswT)
|
||||
metricName := metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable)
|
||||
}
|
||||
|
||||
func (u *DatadogUnifi) batchUSWstat(sw *unifi.Sw) map[string]float64 {
|
||||
if sw == nil {
|
||||
return map[string]float64{}
|
||||
}
|
||||
|
||||
return map[string]float64{
|
||||
"stat_bytes": sw.Bytes.Val,
|
||||
"stat_rx_bytes": sw.RxBytes.Val,
|
||||
"stat_rx_crypts": sw.RxCrypts.Val,
|
||||
"stat_rx_dropped": sw.RxDropped.Val,
|
||||
"stat_rx_errors": sw.RxErrors.Val,
|
||||
"stat_rx_frags": sw.RxFrags.Val,
|
||||
"stat_rx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_bytes": sw.TxBytes.Val,
|
||||
"stat_tx_dropped": sw.TxDropped.Val,
|
||||
"stat_tx_errors": sw.TxErrors.Val,
|
||||
"stat_tx_packets": sw.TxPackets.Val,
|
||||
"stat_tx_retries": sw.TxRetries.Val,
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:funlen
|
||||
func (u *DatadogUnifi) batchPortTable(r report, t map[string]string, pt []unifi.Port) {
|
||||
for _, p := range pt {
|
||||
if !u.DeadPorts && (!p.Up.Val || !p.Enable.Val) {
|
||||
continue // only record UP ports.
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"site_name": t["site_name"],
|
||||
"device_name": t["name"],
|
||||
"source": t["source"],
|
||||
"type": t["type"],
|
||||
"name": p.Name,
|
||||
"poe_mode": p.PoeMode,
|
||||
"port_poe": p.PortPoe.Txt,
|
||||
"port_idx": p.PortIdx.Txt,
|
||||
"port_id": t["name"] + " Port " + p.PortIdx.Txt,
|
||||
"poe_enable": p.PoeEnable.Txt,
|
||||
"flow_ctrl_rx": p.FlowctrlRx.Txt,
|
||||
"flow_ctrl_tx": p.FlowctrlTx.Txt,
|
||||
"media": p.Media,
|
||||
"has_sfp": p.SFPFound.Txt,
|
||||
"sfp_compliance": p.SFPCompliance,
|
||||
"sfp_serial": p.SFPSerial,
|
||||
"sfp_vendor": p.SFPVendor,
|
||||
"sfp_part": p.SFPPart,
|
||||
})
|
||||
data := map[string]float64{
|
||||
"bytes_r": p.BytesR.Val,
|
||||
"rx_broadcast": p.RxBroadcast.Val,
|
||||
"rx_bytes": p.RxBytes.Val,
|
||||
"rx_bytes_r": p.RxBytesR.Val,
|
||||
"rx_dropped": p.RxDropped.Val,
|
||||
"rx_errors": p.RxErrors.Val,
|
||||
"rx_multicast": p.RxMulticast.Val,
|
||||
"rx_packets": p.RxPackets.Val,
|
||||
"speed": p.Speed.Val,
|
||||
"stp_path_cost": p.StpPathcost.Val,
|
||||
"tx_broadcast": p.TxBroadcast.Val,
|
||||
"tx_bytes": p.TxBytes.Val,
|
||||
"tx_bytes_r": p.TxBytesR.Val,
|
||||
"tx_dropped": p.TxDropped.Val,
|
||||
"tx_errors": p.TxErrors.Val,
|
||||
"tx_multicast": p.TxMulticast.Val,
|
||||
"tx_packets": p.TxPackets.Val,
|
||||
}
|
||||
|
||||
if p.PoeEnable.Val && p.PortPoe.Val {
|
||||
data["poe_current"] = p.PoeCurrent.Val
|
||||
data["poe_power"] = p.PoePower.Val
|
||||
data["poe_voltage"] = p.PoeVoltage.Val
|
||||
}
|
||||
|
||||
if p.SFPFound.Val {
|
||||
data["sfp_current"] = p.SFPCurrent.Val
|
||||
data["sfp_voltage"] = p.SFPVoltage.Val
|
||||
data["sfp_temperature"] = p.SFPTemperature.Val
|
||||
data["sfp_tx_power"] = p.SFPTxpower.Val
|
||||
data["sfp_rx_power"] = p.SFPRxpower.Val
|
||||
}
|
||||
|
||||
metricName := metricNamespace("usw.ports")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
package datadogunifi
|
||||
|
||||
import (
|
||||
"github.com/unpoller/unifi"
|
||||
)
|
||||
|
||||
// uxgT is used as a name for printed/logged counters.
|
||||
const uxgT = item("UXG")
|
||||
|
||||
// batchUXG generates 10Gb Unifi Gateway datapoints for Datadog.
|
||||
// These points can be passed directly to datadog.
|
||||
func (u *DatadogUnifi) batchUXG(r report, s *unifi.UXG) { // nolint: funlen
|
||||
if !s.Adopted.Val || s.Locating.Val {
|
||||
return
|
||||
}
|
||||
|
||||
tags := cleanTags(map[string]string{
|
||||
"source": s.SourceName,
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
"license_state": s.LicenseState,
|
||||
})
|
||||
data := CombineFloat64(
|
||||
u.batchUDMstorage(s.Storage),
|
||||
u.batchUDMtemps(s.Temperatures),
|
||||
u.batchUSGstats(s.SpeedtestStatus, s.Stat.Gw, s.Uplink),
|
||||
u.batchSysStats(s.SysStats, s.SystemStats),
|
||||
map[string]float64{
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
"state": s.State.Val,
|
||||
"user_num_sta": s.UserNumSta.Val,
|
||||
"num_desktop": s.NumDesktop.Val,
|
||||
"num_handheld": s.NumHandheld.Val,
|
||||
"num_mobile": s.NumMobile.Val,
|
||||
},
|
||||
)
|
||||
|
||||
r.addCount(uxgT)
|
||||
|
||||
metricName := metricNamespace("usg")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchNetTable(r, tags, s.NetworkTable)
|
||||
u.batchUSGwans(r, tags, s.Wan1, s.Wan2)
|
||||
|
||||
tags = cleanTags(map[string]string{
|
||||
"mac": s.Mac,
|
||||
"site_name": s.SiteName,
|
||||
"source": s.SourceName,
|
||||
"name": s.Name,
|
||||
"version": s.Version,
|
||||
"model": s.Model,
|
||||
"serial": s.Serial,
|
||||
"type": s.Type,
|
||||
"ip": s.IP,
|
||||
})
|
||||
data = CombineFloat64(
|
||||
u.batchUSWstat(s.Stat.Sw),
|
||||
map[string]float64{
|
||||
"guest_num_sta": s.GuestNumSta.Val,
|
||||
"bytes": s.Bytes.Val,
|
||||
"last_seen": s.LastSeen.Val,
|
||||
"rx_bytes": s.RxBytes.Val,
|
||||
"tx_bytes": s.TxBytes.Val,
|
||||
"uptime": s.Uptime.Val,
|
||||
})
|
||||
|
||||
metricName = metricNamespace("usw")
|
||||
reportGaugeForFloat64Map(r, metricName, data, tags)
|
||||
|
||||
u.batchPortTable(r, tags, s.PortTable) // udm has a usw in it.
|
||||
}
|
||||
Loading…
Reference in New Issue