Added reset before collecting new metrics to prevent dangling metrics
This commit is contained in:
@@ -46,6 +46,9 @@ func (c *PveClusterStateCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.nodes.Reset()
|
||||
c.quorate.Reset()
|
||||
|
||||
l := prometheus.Labels{"cluster": cluster.Name}
|
||||
c.nodes.With(l).Set(float64(cluster.Nodes))
|
||||
c.quorate.With(l).Set(float64(cluster.Quorate))
|
||||
|
||||
@@ -161,6 +161,20 @@ func (c *PveContainerCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.state.Reset()
|
||||
c.cpu.Reset()
|
||||
c.memBytes.Reset()
|
||||
c.diskMax.Reset()
|
||||
c.uptime.Reset()
|
||||
c.cpuUsage.Reset()
|
||||
c.memBytesUsed.Reset()
|
||||
c.netReceive.Reset()
|
||||
c.netTransmit.Reset()
|
||||
c.diskRead.Reset()
|
||||
c.diskWrite.Reset()
|
||||
c.disk.Reset()
|
||||
c.swap.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
containers, err := c.apiClient.GetNodeContainerList(node.Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -59,6 +59,10 @@ func (c *PveNodeDiskCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.healthy.Reset()
|
||||
c.wearout.Reset()
|
||||
c.sizeBytes.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
disks, err := c.apiClient.GetNodeDisksList(node.Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -41,6 +41,8 @@ func (c *PveSdnCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.state.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
sdns := resources.FindNodeSDN(node.Name)
|
||||
if len(*sdns) > 0 {
|
||||
|
||||
@@ -229,6 +229,27 @@ func (c *PveNodeStatusCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.state.Reset()
|
||||
c.uptime.Reset()
|
||||
c.cpus.Reset()
|
||||
c.cpuUsage.Reset()
|
||||
c.memBytes.Reset()
|
||||
c.memBytesUsed.Reset()
|
||||
c.memBytesFree.Reset()
|
||||
c.ksmShared.Reset()
|
||||
c.cgroupMode.Reset()
|
||||
c.load1.Reset()
|
||||
c.load5.Reset()
|
||||
c.load15.Reset()
|
||||
c.fSFree.Reset()
|
||||
c.fSUsed.Reset()
|
||||
c.fSTotal.Reset()
|
||||
c.fSAvail.Reset()
|
||||
c.cpuInfo.Reset()
|
||||
c.systemInfo.Reset()
|
||||
c.time.Reset()
|
||||
c.localTime.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
labels := prometheus.Labels{
|
||||
"cluster": cluster.GetClusterName(),
|
||||
|
||||
@@ -68,6 +68,11 @@ func (c *PveStorageCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.state.Reset()
|
||||
c.total.Reset()
|
||||
c.avail.Reset()
|
||||
c.used.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
storages, err := c.apiClient.GetNodeStorages(node.Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -78,6 +78,12 @@ func (c *PveSubscriptionCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.info.Reset()
|
||||
c.status.Reset()
|
||||
c.nextDueDate.Reset()
|
||||
c.regDate.Reset()
|
||||
c.sockets.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
labels := prometheus.Labels{
|
||||
"cluster": cluster.GetClusterName(),
|
||||
|
||||
@@ -228,6 +228,24 @@ func (c *PveVirtualMachineCollector) CollectMetrics() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.state.Reset()
|
||||
c.cpu.Reset()
|
||||
c.memBytes.Reset()
|
||||
c.diskMax.Reset()
|
||||
c.uptime.Reset()
|
||||
c.cpuUsage.Reset()
|
||||
c.memBytesUsed.Reset()
|
||||
c.netReceive.Reset()
|
||||
c.netTransmit.Reset()
|
||||
c.diskReadOps.Reset()
|
||||
c.diskWriteOps.Reset()
|
||||
c.diskReadBytes.Reset()
|
||||
c.diskWriteBytes.Reset()
|
||||
c.diskFailedReadOps.Reset()
|
||||
c.diskFailedWriteOps.Reset()
|
||||
c.diskReadTimeNs.Reset()
|
||||
c.diskWriteTimeNs.Reset()
|
||||
|
||||
for _, node := range cluster.NodeStatuses {
|
||||
qemus, err := c.apiClient.GetNodeQemuList(node.Name)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user