110 lines
2.8 KiB
Go
110 lines
2.8 KiB
Go
package metrics
|
|
|
|
import (
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"lostak.dev/pve-exporter/proxmox"
|
|
)
|
|
|
|
// PVE cluster state collector.
|
|
type PveNodeDiskCollector struct {
|
|
apiClient *proxmox.PveApiClient // PVE API client instance.
|
|
registry *TTLRegistry // TTL metrics registry.
|
|
|
|
healthy *TTLGaugeVec // Node disk SMART passed state prometheus gauge.
|
|
wearout *TTLGaugeVec // Node disk wearout % prometheus gauge.
|
|
sizeBytes *TTLGaugeVec // Node disk size in bytes prometheus gauge.
|
|
}
|
|
|
|
// Create new instance of PVE cluster state collector.
|
|
func NewPveNodeDiskCollector(apiClient *proxmox.PveApiClient, registry *TTLRegistry) *PveNodeDiskCollector {
|
|
c := PveNodeDiskCollector{apiClient: apiClient}
|
|
c.registry = registry
|
|
|
|
// Node disk healthy state.
|
|
c.healthy = NewTTLGaugeVec(
|
|
prometheus.GaugeOpts{
|
|
Name: "pve_node_disk_healthy",
|
|
Help: "Node disk healthy state.",
|
|
},
|
|
[]string{"cluster", "node", "wwn", "type", "model", "serial", "vendor", "used", "osd_id"},
|
|
1*time.Minute,
|
|
)
|
|
c.registry.Register(c.healthy)
|
|
|
|
// Node disk wearout.
|
|
c.wearout = NewTTLGaugeVec(
|
|
prometheus.GaugeOpts{
|
|
Name: "pve_node_disk_wearout",
|
|
Help: "Node disk wearout percent.",
|
|
},
|
|
[]string{"cluster", "node", "wwn", "type", "model", "serial", "vendor", "used", "osd_id"},
|
|
1*time.Minute,
|
|
)
|
|
c.registry.Register(c.healthy)
|
|
|
|
// Node disk size in bytes.
|
|
c.sizeBytes = NewTTLGaugeVec(
|
|
prometheus.GaugeOpts{
|
|
Name: "pve_node_disk_size_bytes",
|
|
Help: "Node disk size in bytes.",
|
|
},
|
|
[]string{"cluster", "node", "wwn", "type", "model", "serial", "vendor", "used", "osd_id"},
|
|
1*time.Minute,
|
|
)
|
|
c.registry.Register(c.sizeBytes)
|
|
|
|
return &c
|
|
}
|
|
|
|
// PveMetricsCollector interface implementation.
|
|
func (c *PveNodeDiskCollector) CollectMetrics() error {
|
|
cluster, err := c.apiClient.GetClusterStatus()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, node := range cluster.NodeStatuses {
|
|
disks, err := c.apiClient.GetNodeDisksList(node.Name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, disk := range *disks {
|
|
labels := prometheus.Labels{
|
|
"cluster": cluster.GetClusterName(),
|
|
"node": node.Name,
|
|
"wwn": disk.WWN,
|
|
"type": disk.Type,
|
|
"model": disk.Model,
|
|
"serial": disk.Serial,
|
|
"vendor": strings.TrimSpace(disk.Vendor),
|
|
"used": disk.Used,
|
|
"osd_id": strconv.Itoa(disk.OSDID),
|
|
}
|
|
|
|
// Disk healthy state.
|
|
c.healthy.With(labels).Set(disk.GetSmartPassedState())
|
|
|
|
// Disk wearout %.
|
|
wearout, ok := disk.WearOut.(float64)
|
|
if ok {
|
|
c.wearout.With(labels).Set(wearout)
|
|
}
|
|
|
|
// Disk size in bytes.
|
|
c.sizeBytes.With(labels).Set(float64(disk.Size))
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// PveMetricsCollector interface implementation.
|
|
func (c *PveNodeDiskCollector) GetName() string {
|
|
return "Node Disks"
|
|
}
|