You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
gentoo-overlay/app-metrics/burrow_exporter/files/burrow_exporter-0.0.6-skipp...

274 lines
8.9 KiB

From 50db40e49750378e22e2b638754ff07ff29f5362 Mon Sep 17 00:00:00 2001
From: NovaPS6 <NovaPS6@ostmw0195464.bskyb.com>
Date: Wed, 8 Aug 2018 14:43:33 +0100
Subject: [PATCH] make all metrics skippable
Change import back to jirwin
---
.gitignore | 1 +
burrow-exporter.go | 67 +++++++++++++++++------
burrow_exporter/exporter.go | 102 +++++++++++++++++++++---------------
3 files changed, 113 insertions(+), 57 deletions(-)
diff --git a/.gitignore b/.gitignore
index 502219c..a8e0d62 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,4 @@
burrow-exporter
dist/
vendor/
+.idea/
diff --git a/burrow-exporter.go b/burrow-exporter.go
index 1e60d67..b4e871b 100644
--- a/burrow-exporter.go
+++ b/burrow-exporter.go
@@ -14,7 +14,7 @@ import (
"github.com/jirwin/burrow_exporter/burrow_exporter"
)
-var Version = "0.0.4"
+var Version = "0.0.5"
func main() {
app := cli.NewApp()
@@ -22,36 +22,61 @@ func main() {
app.Name = "burrow-exporter"
app.Flags = []cli.Flag{
cli.StringFlag{
- Name: "burrow-addr",
- Usage: "Address that burrow is listening on",
+ Name: "burrow-addr",
+ Usage: "Address that burrow is listening on",
EnvVar: "BURROW_ADDR",
},
cli.StringFlag{
- Name: "metrics-addr",
- Usage: "Address to run prometheus on",
+ Name: "metrics-addr",
+ Usage: "Address to run prometheus on",
EnvVar: "METRICS_ADDR",
},
cli.IntFlag{
- Name: "interval",
- Usage: "The interval(seconds) specifies how often to scrape burrow.",
+ Name: "interval",
+ Usage: "The interval(seconds) specifies how often to scrape burrow.",
EnvVar: "INTERVAL",
},
cli.IntFlag{
- Name: "api-version",
- Usage: "Burrow API version to leverage",
- Value: 2,
+ Name: "api-version",
+ Usage: "Burrow API version to leverage",
+ Value: 2,
EnvVar: "API_VERSION",
},
cli.BoolFlag{
- Name: "skip-partition-status",
- Usage: "Skip exporting the per-partition status",
+ Name: "skip-partition-status",
+ Usage: "Skip exporting the per-partition status",
EnvVar: "SKIP_PARTITION_STATUS",
},
cli.BoolFlag{
- Name: "skip-group-status",
- Usage: "Skip exporting the per-group status",
+ Name: "skip-group-status",
+ Usage: "Skip exporting the per-group status",
EnvVar: "SKIP_GROUP_STATUS",
},
+ cli.BoolFlag{
+ Name: "skip-partition-lag",
+ Usage: "Skip exporting the partition lag",
+ EnvVar: "SKIP_PARTITION_LAG",
+ },
+ cli.BoolFlag{
+ Name: "skip-partition-current-offset",
+ Usage: "Skip exporting the current offset per partition",
+ EnvVar: "SKIP_PARTITION_CURRENT_OFFSET",
+ },
+ cli.BoolFlag{
+ Name: "skip-partition-max-offset",
+ Usage: "Skip exporting the partition max offset",
+ EnvVar: "SKIP_PARTITION_MAX_OFFSET",
+ },
+ cli.BoolFlag{
+ Name: "skip-total-lag",
+ Usage: "Skip exporting the total lag",
+ EnvVar: "SKIP_TOTAL_LAG",
+ },
+ cli.BoolFlag{
+ Name: "skip-topic-partition-offset",
+ Usage: "Skip exporting topic partition offset",
+ EnvVar: "SKIP_TOPIC_PARTITION_OFFSET",
+ },
}
app.Action = func(c *cli.Context) error {
@@ -76,8 +101,18 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
- exporter := burrow_exporter.MakeBurrowExporter(c.String("burrow-addr"), c.Int("api-version"),
- c.String("metrics-addr"), c.Int("interval"), c.Bool("skip-partition-status"), c.Bool("skip-group-status"))
+ exporter := burrow_exporter.MakeBurrowExporter(
+ c.String("burrow-addr"),
+ c.Int("api-version"),
+ c.String("metrics-addr"),
+ c.Int("interval"),
+ c.Bool("skip-partition-status"),
+ c.Bool("skip-group-status"),
+ c.Bool("skip-partition-lag"),
+ c.Bool("skip-partition-current-offset"),
+ c.Bool("skip-partition-max-offset"),
+ c.Bool("skip-total-lag"),
+ c.Bool("skip-topic-partition-offset"))
go exporter.Start(ctx)
<-done
diff --git a/burrow_exporter/exporter.go b/burrow_exporter/exporter.go
index 7adc398..8b48b35 100644
--- a/burrow_exporter/exporter.go
+++ b/burrow_exporter/exporter.go
@@ -16,12 +16,17 @@ import (
)
type BurrowExporter struct {
- client *BurrowClient
- metricsListenAddr string
- interval int
- wg sync.WaitGroup
- skipPartitionStatus bool
- skipConsumerStatus bool
+ client *BurrowClient
+ metricsListenAddr string
+ interval int
+ wg sync.WaitGroup
+ skipPartitionStatus bool
+ skipConsumerStatus bool
+ skipPartitionLag bool
+ skipPartitionCurrentOffset bool
+ skipPartitionMaxOffset bool
+ skipTotalLag bool
+ skipTopicPartitionOffset bool
}
func (be *BurrowExporter) processGroup(cluster, group string) {
@@ -34,19 +39,23 @@ func (be *BurrowExporter) processGroup(cluster, group string) {
}
for _, partition := range status.Status.Partitions {
- KafkaConsumerPartitionLag.With(prometheus.Labels{
- "cluster": status.Status.Cluster,
- "group": status.Status.Group,
- "topic": partition.Topic,
- "partition": strconv.Itoa(int(partition.Partition)),
- }).Set(float64(partition.End.Lag))
-
- KafkaConsumerPartitionCurrentOffset.With(prometheus.Labels{
- "cluster": status.Status.Cluster,
- "group": status.Status.Group,
- "topic": partition.Topic,
- "partition": strconv.Itoa(int(partition.Partition)),
- }).Set(float64(partition.End.Offset))
+ if !be.skipPartitionLag {
+ KafkaConsumerPartitionLag.With(prometheus.Labels{
+ "cluster": status.Status.Cluster,
+ "group": status.Status.Group,
+ "topic": partition.Topic,
+ "partition": strconv.Itoa(int(partition.Partition)),
+ }).Set(float64(partition.End.Lag))
+ }
+
+ if !be.skipPartitionCurrentOffset {
+ KafkaConsumerPartitionCurrentOffset.With(prometheus.Labels{
+ "cluster": status.Status.Cluster,
+ "group": status.Status.Group,
+ "topic": partition.Topic,
+ "partition": strconv.Itoa(int(partition.Partition)),
+ }).Set(float64(partition.End.Offset))
+ }
if !be.skipPartitionStatus {
KafkaConsumerPartitionCurrentStatus.With(prometheus.Labels{
@@ -57,18 +66,22 @@ func (be *BurrowExporter) processGroup(cluster, group string) {
}).Set(float64(Status[partition.Status]))
}
- KafkaConsumerPartitionMaxOffset.With(prometheus.Labels{
- "cluster": status.Status.Cluster,
- "group": status.Status.Group,
- "topic": partition.Topic,
- "partition": strconv.Itoa(int(partition.Partition)),
- }).Set(float64(partition.End.MaxOffset))
+ if !be.skipPartitionMaxOffset {
+ KafkaConsumerPartitionMaxOffset.With(prometheus.Labels{
+ "cluster": status.Status.Cluster,
+ "group": status.Status.Group,
+ "topic": partition.Topic,
+ "partition": strconv.Itoa(int(partition.Partition)),
+ }).Set(float64(partition.End.MaxOffset))
+ }
}
- KafkaConsumerTotalLag.With(prometheus.Labels{
- "cluster": status.Status.Cluster,
- "group": status.Status.Group,
- }).Set(float64(status.Status.TotalLag))
+ if !be.skipTotalLag {
+ KafkaConsumerTotalLag.With(prometheus.Labels{
+ "cluster": status.Status.Cluster,
+ "group": status.Status.Group,
+ }).Set(float64(status.Status.TotalLag))
+ }
if !be.skipConsumerStatus {
KafkaConsumerStatus.With(prometheus.Labels{
@@ -88,12 +101,14 @@ func (be *BurrowExporter) processTopic(cluster, topic string) {
return
}
- for i, offset := range details.Offsets {
- KafkaTopicPartitionOffset.With(prometheus.Labels{
- "cluster": cluster,
- "topic": topic,
- "partition": strconv.Itoa(i),
- }).Set(float64(offset))
+ if !be.skipTopicPartitionOffset {
+ for i, offset := range details.Offsets {
+ KafkaTopicPartitionOffset.With(prometheus.Labels{
+ "cluster": cluster,
+ "topic": topic,
+ "partition": strconv.Itoa(i),
+ }).Set(float64(offset))
+ }
}
}
@@ -208,12 +223,17 @@ func (be *BurrowExporter) mainLoop(ctx context.Context) {
}
func MakeBurrowExporter(burrowUrl string, apiVersion int, metricsAddr string, interval int, skipPartitionStatus bool,
- skipConsumerStatus bool) *BurrowExporter {
+ skipConsumerStatus bool, skipPartitionLag bool, skipPartitionCurrentOffset bool, skipPartitionMaxOffset bool, skipTotalLag bool, skipTopicPartitionOffset bool) *BurrowExporter {
return &BurrowExporter{
- client: MakeBurrowClient(burrowUrl, apiVersion),
- metricsListenAddr: metricsAddr,
- interval: interval,
- skipPartitionStatus: skipPartitionStatus,
- skipConsumerStatus: skipConsumerStatus,
+ client: MakeBurrowClient(burrowUrl, apiVersion),
+ metricsListenAddr: metricsAddr,
+ interval: interval,
+ skipPartitionStatus: skipPartitionStatus,
+ skipConsumerStatus: skipConsumerStatus,
+ skipPartitionLag: skipPartitionLag,
+ skipPartitionCurrentOffset: skipPartitionCurrentOffset,
+ skipPartitionMaxOffset: skipPartitionMaxOffset,
+ skipTotalLag: skipTotalLag,
+ skipTopicPartitionOffset: skipTopicPartitionOffset,
}
}