Skip to content
This repository has been archived by the owner on May 4, 2022. It is now read-only.

Commit

Permalink
Remove sa_ prefix (#74)
Browse files Browse the repository at this point in the history
We should not need to prefix metric with the deprecated framework abbreviation.
  • Loading branch information
paramite authored Feb 26, 2020
1 parent 043bbd9 commit 9990fd6
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 31 deletions.
22 changes: 11 additions & 11 deletions configs/prometheus.rules.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@
rules:
# Alert for any instance that is unreachable for >5 minutes.
- alert: Collectd_down
#expr: absent(sa_collectd_uptime) // this cant be predicted
expr: sa_collectd_last_metric_for_host_status==0
#expr: absent(collectd_uptime) // this cant be predicted
expr: collectd_last_metric_for_host_status==0
for: 20s
labels:
severity: critical
Expand All @@ -54,8 +54,8 @@
rules:
# Alert for any instance that is unreachable for >5 minutes.
- alert: qpid_router_down
#expr: absent(sa_collectd_uptime)
expr: sa_collectd_qpid_router_status==0
#expr: absent(collectd_uptime)
expr: collectd_qpid_router_status==0
for: 10s
labels:
severity: critical
Expand All @@ -68,8 +68,8 @@
rules:
# Alert for any instance that is unreachable for >5 minutes.
- alert: elasticsearch_down
#expr: absent(sa_collectd_uptime)
expr: sa_collectd_elasticsearch_status==0
#expr: absent(collectd_uptime)
expr: collectd_elasticsearch_status==0
for: 10s
labels:
severity: critical
Expand All @@ -82,8 +82,8 @@
rules:
# Alert for any instance that is unreachable for >5 minutes.
- alert: smartgateway_listerner_slow
#expr: absent(sa_collectd_uptime)
expr: floor(sa_collectd_last_pull_timestamp_seconds)-time()>6
#expr: absent(collectd_uptime)
expr: floor(collectd_last_pull_timestamp_seconds)-time()>6
for: 10s
labels:
severity: critical
Expand All @@ -96,8 +96,8 @@
rules:
# Alert for any instance that is unreachable for >5 minutes.
- alert: smartgateway_listerner_down
#expr: absent(sa_collectd_uptime)
expr: absent(sa_collectd_last_pull_timestamp_seconds)
#expr: absent(collectd_uptime)
expr: absent(collectd_last_pull_timestamp_seconds)
for: 20s
labels:
severity: critical
Expand Down Expand Up @@ -134,7 +134,7 @@
interval: 5s
rules:
- alert: High CPU usage
expr: (avg by (exported_instance) (irate(sa_collectd_cpu_total[5m]))) > 75
expr: (avg by (exported_instance) (irate(collectd_cpu_total[5m]))) > 75
for: 20s
labels:
severity: critical
Expand Down
6 changes: 3 additions & 3 deletions internal/pkg/amqp10/receiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,15 @@ func NewAMQPHandler(source string) *AMQPHandler {
totalCount: 0,
totalProcessed: 0,
totalReconnectCount: 0,
totalCountDesc: prometheus.NewDesc("sa_collectd_total_amqp_message_recv_count",
totalCountDesc: prometheus.NewDesc("collectd_total_amqp_message_recv_count",
"Total count of amqp message received.",
nil, plabels,
),
totalProcessedDesc: prometheus.NewDesc("sa_collectd_total_amqp_processed_message_count",
totalProcessedDesc: prometheus.NewDesc("collectd_total_amqp_processed_message_count",
"Total count of amqp message processed.",
nil, plabels,
),
totalReconnectCountDesc: prometheus.NewDesc("sa_collectd_total_amqp_reconnect_count",
totalReconnectCountDesc: prometheus.NewDesc("collectd_total_amqp_reconnect_count",
"Total count of amqp reconnection .",
nil, plabels,
),
Expand Down
10 changes: 5 additions & 5 deletions internal/pkg/api/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,11 @@ func NewAppStateMetricHandler(applicationHealth *cacheutil.ApplicationHealthCach
plabels["source"] = "Metric Listener"
return &MetricHandler{
applicationHealth: applicationHealth,
lastPull: prometheus.NewDesc("sa_collectd_last_pull_timestamp_seconds",
lastPull: prometheus.NewDesc("collectd_last_pull_timestamp_seconds",
"Unix timestamp of the last metrics pull in seconds.",
nil, plabels,
),
qpidRouterState: prometheus.NewDesc("sa_collectd_qpid_router_status",
qpidRouterState: prometheus.NewDesc("collectd_qpid_router_status",
"Metric listener router status ",
nil, plabels,
),
Expand All @@ -149,15 +149,15 @@ func NewAppStateEventMetricHandler(applicationHealth *cacheutil.ApplicationHealt

return &EventMetricHandler{
applicationHealth: applicationHealth,
lastPull: prometheus.NewDesc("sa_collectd_last_pull_timestamp_seconds",
lastPull: prometheus.NewDesc("collectd_last_pull_timestamp_seconds",
"Unix timestamp of the last event listener pull in seconds.",
nil, plabels,
),
qpidRouterState: prometheus.NewDesc("sa_collectd_qpid_router_status",
qpidRouterState: prometheus.NewDesc("collectd_qpid_router_status",
"Event listener router status ",
nil, plabels,
),
elasticSearchState: prometheus.NewDesc("sa_collectd_elasticsearch_status",
elasticSearchState: prometheus.NewDesc("collectd_elasticsearch_status",
"Event listener ElasticSearch status ",
nil, plabels,
),
Expand Down
4 changes: 2 additions & 2 deletions internal/pkg/metrics/incoming/collectd.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,9 @@ func (c CollectdMetric) GetMetricDesc(index int) string {

//GetMetricName ...
func (c CollectdMetric) GetMetricName(index int) string {
name := "sa_collectd_" + c.Plugin + "_" + c.Type
name := "collectd_" + c.Plugin + "_" + c.Type
if c.Plugin == c.Type {
name = "sa_collectd_" + c.Type
name = "collectd_" + c.Type
}

if dsname := c.DSName(index); dsname != "value" {
Expand Down
4 changes: 2 additions & 2 deletions internal/pkg/tsdb/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ var (
//NewHeartBeatMetricByHost ...
func NewHeartBeatMetricByHost(instance string, value float64) (prometheus.Metric, error) {
valueType := prometheus.GaugeValue
metricName := "sa_collectd_last_metric_for_host_status"
metricName := "collectd_last_metric_for_host_status"
help := "Status of metrics for host currently active."

plabels := prometheus.Labels{}
Expand All @@ -35,7 +35,7 @@ func NewHeartBeatMetricByHost(instance string, value float64) (prometheus.Metric
//AddMetricsByHost ...
func AddMetricsByHost(instance string, value float64) (prometheus.Metric, error) {
valueType := prometheus.GaugeValue
metricName := "sa_collectd_metric_per_host"
metricName := "collectd_metric_per_host"
help := "No of metrics for host currently read."

plabels := prometheus.Labels{}
Expand Down
8 changes: 4 additions & 4 deletions tests/internal_pkg/incoming_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,14 +122,14 @@ func TestCollectdIncoming(t *testing.T) {
metricDesc := "Service Assurance exporter: 'pluginname' Type: 'collectd' Dstype: 'gauge' Dsname: 'value1'"
assert.Equal(t, metricDesc, sample.GetMetricDesc(0))
// test GetMetricName behaviour
metricName := "sa_collectd_pluginname_collectd_value1"
metricName := "collectd_pluginname_collectd_value1"
assert.Equal(t, metricName, sample.GetMetricName(0))
sample.Type = sample.Plugin
metricName = "sa_collectd_pluginname_value1"
metricName = "collectd_pluginname_value1"
assert.Equal(t, metricName, sample.GetMetricName(0))
sample.Dstypes = []string{"counter", "derive"}
metricName1 := "sa_collectd_pluginname_value1_total"
metricName2 := "sa_collectd_pluginname_value2_total"
metricName1 := "collectd_pluginname_value1_total"
metricName2 := "collectd_pluginname_value2_total"
assert.Equal(t, metricName1, sample.GetMetricName(0))
assert.Equal(t, metricName2, sample.GetMetricName(1))
})
Expand Down
8 changes: 4 additions & 4 deletions tests/internal_pkg/tsdb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,19 +35,19 @@ func TestTimestamp(t *testing.T) {
func TestCollectdMetric(t *testing.T) {
t.Run("Test prometeus metric values", func(t *testing.T) {
sample, collectdMetric, metric := GenerateCollectdMetric("hostname", "pluginname", true, 0)
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"sa_collectd_pluginname_collectd_value1\""))
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"collectd_pluginname_collectd_value1\""))
assert.Equal(t, sample.Values[0], metric.GetGauge().GetValue())
assert.Equal(t, 0.0, metric.GetCounter().GetValue())

sample, collectdMetric, metric = GenerateCollectdMetric("hostname", "pluginname", true, 1)
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"sa_collectd_pluginname_collectd_value2_total\""))
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"collectd_pluginname_collectd_value2_total\""))
assert.Equal(t, sample.Values[1], metric.GetCounter().GetValue())
assert.Equal(t, 0.0, metric.GetGauge().GetValue())
})

t.Run("Test heart beat metric", func(t *testing.T) {
collectdMetric, _ := tsdb.NewHeartBeatMetricByHost("test_heartbeat", 66.6)
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"sa_collectd_last_metric_for_host_status\""))
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"collectd_last_metric_for_host_status\""))
metric := dto.Metric{}
collectdMetric.Write(&metric)
assert.Equal(t, 66.6, metric.GetGauge().GetValue())
Expand All @@ -56,7 +56,7 @@ func TestCollectdMetric(t *testing.T) {

t.Run("Test metric by host", func(t *testing.T) {
collectdMetric, _ := tsdb.AddMetricsByHost("test_host", 666.0)
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"sa_collectd_metric_per_host\""))
assert.True(t, strings.HasPrefix(collectdMetric.Desc().String(), "Desc{fqName: \"collectd_metric_per_host\""))
metric := dto.Metric{}
collectdMetric.Write(&metric)
assert.Equal(t, 666.0, metric.GetGauge().GetValue())
Expand Down

0 comments on commit 9990fd6

Please sign in to comment.