forked from danielqsj/kafka_exporter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
kafka_exporter.go
943 lines (841 loc) · 32.4 KB
/
kafka_exporter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
package main
import (
"crypto/tls"
"crypto/x509"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
"github.com/golang/glog"
"github.com/krallistic/kazoo-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
plog "github.com/prometheus/common/promlog"
plogflag "github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/version"
"github.com/rcrowley/go-metrics"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
namespace = "kafka"
clientID = "kafka_exporter"
)
const (
INFO = 0
DEBUG = 1
TRACE = 2
)
var (
clusterBrokers *prometheus.Desc
topicPartitions *prometheus.Desc
topicCurrentOffset *prometheus.Desc
topicOldestOffset *prometheus.Desc
topicPartitionLeader *prometheus.Desc
topicPartitionReplicas *prometheus.Desc
topicPartitionInSyncReplicas *prometheus.Desc
topicPartitionUsesPreferredReplica *prometheus.Desc
topicUnderReplicatedPartition *prometheus.Desc
consumergroupCurrentOffset *prometheus.Desc
consumergroupCurrentOffsetSum *prometheus.Desc
consumergroupLag *prometheus.Desc
consumergroupLagSum *prometheus.Desc
consumergroupLagZookeeper *prometheus.Desc
// TODO(adrian.arumugam): Hack to patch kafka_exporter to make it reliable ASAP while
// a better upstream patch is figured out.
//consumergroupMembers *prometheus.Desc
)
// Exporter collects Kafka stats from the given server and exports them using
// the prometheus metrics package.
type Exporter struct {
client sarama.Client
topicFilter *regexp.Regexp
groupFilter *regexp.Regexp
mu sync.Mutex
useZooKeeperLag bool
zookeeperClient *kazoo.Kazoo
nextMetadataRefresh time.Time
metadataRefreshInterval time.Duration
offsetShowAll bool
topicWorkers int
allowConcurrent bool
sgMutex sync.Mutex
sgWaitCh chan struct{}
sgChans []chan<- prometheus.Metric
consumerGroupFetchAll bool
}
type kafkaOpts struct {
uri []string
useSASL bool
useSASLHandshake bool
saslUsername string
saslPassword string
saslMechanism string
saslDisablePAFXFast bool
useTLS bool
tlsServerName string
tlsCAFile string
tlsCertFile string
tlsKeyFile string
serverUseTLS bool
serverMutualAuthEnabled bool
serverTlsCAFile string
serverTlsCertFile string
serverTlsKeyFile string
tlsInsecureSkipTLSVerify bool
kafkaVersion string
useZooKeeperLag bool
uriZookeeper []string
labels string
metadataRefreshInterval string
serviceName string
kerberosConfigPath string
realm string
keyTabPath string
kerberosAuthType string
offsetShowAll bool
topicWorkers int
allowConcurrent bool
verbosityLogLevel int
}
// CanReadCertAndKey returns true if the certificate and key files already exists,
// otherwise returns false. If lost one of cert and key, returns error.
func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
certReadable := canReadFile(certPath)
keyReadable := canReadFile(keyPath)
if certReadable == false && keyReadable == false {
return false, nil
}
if certReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
}
if keyReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
}
return true, nil
}
// If the file represented by path exists and
// readable, returns true otherwise returns false.
func canReadFile(path string) bool {
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
return true
}
// NewExporter returns an initialized Exporter.
func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Exporter, error) {
var zookeeperClient *kazoo.Kazoo
config := sarama.NewConfig()
config.ClientID = clientID
kafkaVersion, err := sarama.ParseKafkaVersion(opts.kafkaVersion)
if err != nil {
return nil, err
}
config.Version = kafkaVersion
if opts.useSASL {
// Convert to lowercase so that SHA512 and SHA256 is still valid
opts.saslMechanism = strings.ToLower(opts.saslMechanism)
switch opts.saslMechanism {
case "scram-sha512":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512)
case "scram-sha256":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
case "gssapi":
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeGSSAPI)
config.Net.SASL.GSSAPI.ServiceName = opts.serviceName
config.Net.SASL.GSSAPI.KerberosConfigPath = opts.kerberosConfigPath
config.Net.SASL.GSSAPI.Realm = opts.realm
config.Net.SASL.GSSAPI.Username = opts.saslUsername
if opts.kerberosAuthType == "keytabAuth" {
config.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH
config.Net.SASL.GSSAPI.KeyTabPath = opts.keyTabPath
} else {
config.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH
config.Net.SASL.GSSAPI.Password = opts.saslPassword
}
if opts.saslDisablePAFXFast {
config.Net.SASL.GSSAPI.DisablePAFXFAST = true
}
case "plain":
default:
return nil, fmt.Errorf(
`invalid sasl mechanism "%s": can only be "scram-sha256", "scram-sha512", "gssapi" or "plain"`,
opts.saslMechanism,
)
}
config.Net.SASL.Enable = true
config.Net.SASL.Handshake = opts.useSASLHandshake
if opts.saslUsername != "" {
config.Net.SASL.User = opts.saslUsername
}
if opts.saslPassword != "" {
config.Net.SASL.Password = opts.saslPassword
}
}
if opts.useTLS {
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{
ServerName: opts.tlsServerName,
RootCAs: x509.NewCertPool(),
InsecureSkipVerify: opts.tlsInsecureSkipTLSVerify,
}
if opts.tlsCAFile != "" {
if ca, err := ioutil.ReadFile(opts.tlsCAFile); err == nil {
config.Net.TLS.Config.RootCAs.AppendCertsFromPEM(ca)
} else {
return nil, err
}
}
canReadCertAndKey, err := CanReadCertAndKey(opts.tlsCertFile, opts.tlsKeyFile)
if err != nil {
return nil, errors.Wrap(err, "error reading cert and key")
}
if canReadCertAndKey {
cert, err := tls.LoadX509KeyPair(opts.tlsCertFile, opts.tlsKeyFile)
if err == nil {
config.Net.TLS.Config.Certificates = []tls.Certificate{cert}
} else {
return nil, err
}
}
}
if opts.useZooKeeperLag {
glog.V(DEBUG).Infoln("Using zookeeper lag, so connecting to zookeeper")
zookeeperClient, err = kazoo.NewKazoo(opts.uriZookeeper, nil)
if err != nil {
return nil, errors.Wrap(err, "error connecting to zookeeper")
}
}
interval, err := time.ParseDuration(opts.metadataRefreshInterval)
if err != nil {
return nil, errors.Wrap(err, "Cannot parse metadata refresh interval")
}
config.Metadata.RefreshFrequency = interval
client, err := sarama.NewClient(opts.uri, config)
if err != nil {
return nil, errors.Wrap(err, "Error Init Kafka Client")
}
glog.V(TRACE).Infoln("Done Init Clients")
// Init our exporter.
return &Exporter{
client: client,
topicFilter: regexp.MustCompile(topicFilter),
groupFilter: regexp.MustCompile(groupFilter),
useZooKeeperLag: opts.useZooKeeperLag,
zookeeperClient: zookeeperClient,
nextMetadataRefresh: time.Now(),
metadataRefreshInterval: interval,
offsetShowAll: opts.offsetShowAll,
topicWorkers: opts.topicWorkers,
allowConcurrent: opts.allowConcurrent,
sgMutex: sync.Mutex{},
sgWaitCh: nil,
sgChans: []chan<- prometheus.Metric{},
consumerGroupFetchAll: config.Version.IsAtLeast(sarama.V2_0_0_0),
}, nil
}
func (e *Exporter) fetchOffsetVersion() int16 {
version := e.client.Config().Version
if e.client.Config().Version.IsAtLeast(sarama.V2_0_0_0) {
return 4
} else if version.IsAtLeast(sarama.V0_10_2_0) {
return 2
} else if version.IsAtLeast(sarama.V0_8_2_2) {
return 1
}
return 0
}
// Describe describes all the metrics ever exported by the Kafka exporter. It
// implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- clusterBrokers
ch <- topicCurrentOffset
ch <- topicOldestOffset
ch <- topicPartitions
ch <- topicPartitionLeader
ch <- topicPartitionReplicas
ch <- topicPartitionInSyncReplicas
ch <- topicPartitionUsesPreferredReplica
ch <- topicUnderReplicatedPartition
ch <- consumergroupCurrentOffset
ch <- consumergroupCurrentOffsetSum
ch <- consumergroupLag
ch <- consumergroupLagZookeeper
ch <- consumergroupLagSum
}
// Collect fetches the stats from configured Kafka location and delivers them
// as Prometheus metrics. It implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
if e.allowConcurrent {
e.collect(ch)
return
}
// Locking to avoid race add
e.sgMutex.Lock()
e.sgChans = append(e.sgChans, ch)
// Safe to compare length since we own the Lock
if len(e.sgChans) == 1 {
e.sgWaitCh = make(chan struct{})
go e.collectChans(e.sgWaitCh)
} else {
glog.V(TRACE).Info("concurrent calls detected, waiting for first to finish")
}
// Put in another variable to ensure not overwriting it in another Collect once we wait
waiter := e.sgWaitCh
e.sgMutex.Unlock()
// Released lock, we have insurance that our chan will be part of the collectChan slice
<-waiter
// collectChan finished
}
func (e *Exporter) collectChans(quit chan struct{}) {
original := make(chan prometheus.Metric)
container := make([]prometheus.Metric, 0, 100)
go func() {
for metric := range original {
container = append(container, metric)
}
}()
e.collect(original)
close(original)
// Lock to avoid modification on the channel slice
e.sgMutex.Lock()
for _, ch := range e.sgChans {
for _, metric := range container {
ch <- metric
}
}
// Reset the slice
e.sgChans = e.sgChans[:0]
// Notify remaining waiting Collect they can return
close(quit)
// Release the lock so Collect can append to the slice again
e.sgMutex.Unlock()
}
func (e *Exporter) collect(ch chan<- prometheus.Metric) {
var wg = sync.WaitGroup{}
ch <- prometheus.MustNewConstMetric(
clusterBrokers, prometheus.GaugeValue, float64(len(e.client.Brokers())),
)
offset := make(map[string]map[int32]int64)
now := time.Now()
if now.After(e.nextMetadataRefresh) {
glog.V(DEBUG).Info("Refreshing client metadata")
if err := e.client.RefreshMetadata(); err != nil {
glog.Errorf("Cannot refresh topics, using cached data: %v", err)
}
e.nextMetadataRefresh = now.Add(e.metadataRefreshInterval)
}
topics, err := e.client.Topics()
if err != nil {
glog.Errorf("Cannot get topics: %v", err)
return
}
topicChannel := make(chan string)
getTopicMetrics := func(topic string) {
defer wg.Done()
if !e.topicFilter.MatchString(topic) {
return
}
partitions, err := e.client.Partitions(topic)
if err != nil {
glog.Errorf("Cannot get partitions of topic %s: %v", topic, err)
return
}
ch <- prometheus.MustNewConstMetric(
topicPartitions, prometheus.GaugeValue, float64(len(partitions)), topic,
)
e.mu.Lock()
offset[topic] = make(map[int32]int64, len(partitions))
e.mu.Unlock()
for _, partition := range partitions {
broker, err := e.client.Leader(topic, partition)
if err != nil {
glog.Errorf("Cannot get leader of topic %s partition %d: %v", topic, partition, err)
} else {
ch <- prometheus.MustNewConstMetric(
topicPartitionLeader, prometheus.GaugeValue, float64(broker.ID()), topic, strconv.FormatInt(int64(partition), 10),
)
}
currentOffset, err := e.client.GetOffset(topic, partition, sarama.OffsetNewest)
if err != nil {
glog.Errorf("Cannot get current offset of topic %s partition %d: %v", topic, partition, err)
} else {
e.mu.Lock()
offset[topic][partition] = currentOffset
e.mu.Unlock()
ch <- prometheus.MustNewConstMetric(
topicCurrentOffset, prometheus.GaugeValue, float64(currentOffset), topic, strconv.FormatInt(int64(partition), 10),
)
}
oldestOffset, err := e.client.GetOffset(topic, partition, sarama.OffsetOldest)
if err != nil {
glog.Errorf("Cannot get oldest offset of topic %s partition %d: %v", topic, partition, err)
} else {
ch <- prometheus.MustNewConstMetric(
topicOldestOffset, prometheus.GaugeValue, float64(oldestOffset), topic, strconv.FormatInt(int64(partition), 10),
)
}
replicas, err := e.client.Replicas(topic, partition)
if err != nil {
glog.Errorf("Cannot get replicas of topic %s partition %d: %v", topic, partition, err)
} else {
ch <- prometheus.MustNewConstMetric(
topicPartitionReplicas, prometheus.GaugeValue, float64(len(replicas)), topic, strconv.FormatInt(int64(partition), 10),
)
}
inSyncReplicas, err := e.client.InSyncReplicas(topic, partition)
if err != nil {
glog.Errorf("Cannot get in-sync replicas of topic %s partition %d: %v", topic, partition, err)
} else {
ch <- prometheus.MustNewConstMetric(
topicPartitionInSyncReplicas, prometheus.GaugeValue, float64(len(inSyncReplicas)), topic, strconv.FormatInt(int64(partition), 10),
)
}
if broker != nil && replicas != nil && len(replicas) > 0 && broker.ID() == replicas[0] {
ch <- prometheus.MustNewConstMetric(
topicPartitionUsesPreferredReplica, prometheus.GaugeValue, float64(1), topic, strconv.FormatInt(int64(partition), 10),
)
} else {
ch <- prometheus.MustNewConstMetric(
topicPartitionUsesPreferredReplica, prometheus.GaugeValue, float64(0), topic, strconv.FormatInt(int64(partition), 10),
)
}
if replicas != nil && inSyncReplicas != nil && len(inSyncReplicas) < len(replicas) {
ch <- prometheus.MustNewConstMetric(
topicUnderReplicatedPartition, prometheus.GaugeValue, float64(1), topic, strconv.FormatInt(int64(partition), 10),
)
} else {
ch <- prometheus.MustNewConstMetric(
topicUnderReplicatedPartition, prometheus.GaugeValue, float64(0), topic, strconv.FormatInt(int64(partition), 10),
)
}
if e.useZooKeeperLag {
ConsumerGroups, err := e.zookeeperClient.Consumergroups()
if err != nil {
glog.Errorf("Cannot get consumer group %v", err)
}
for _, group := range ConsumerGroups {
offset, _ := group.FetchOffset(topic, partition)
if offset > 0 {
consumerGroupLag := currentOffset - offset
ch <- prometheus.MustNewConstMetric(
consumergroupLagZookeeper, prometheus.GaugeValue, float64(consumerGroupLag), group.Name, topic, strconv.FormatInt(int64(partition), 10),
)
}
}
}
}
}
loopTopics := func(id int) {
ok := true
for ok {
topic, open := <-topicChannel
ok = open
if open {
getTopicMetrics(topic)
}
}
}
minx := func(x int, y int) int {
if x < y {
return x
} else {
return y
}
}
N := minx(len(topics)/2, e.topicWorkers)
for w := 1; w <= N; w++ {
go loopTopics(w)
}
for _, topic := range topics {
if e.topicFilter.MatchString(topic) {
wg.Add(1)
topicChannel <- topic
}
}
close(topicChannel)
wg.Wait()
getConsumerGroupMetrics := func(broker *sarama.Broker) {
defer wg.Done()
if err := broker.Open(e.client.Config()); err != nil && err != sarama.ErrAlreadyConnected {
glog.Errorf("Cannot connect to broker %d: %v", broker.ID(), err)
return
}
defer broker.Close()
groups, err := broker.ListGroups(&sarama.ListGroupsRequest{})
if err != nil {
glog.Errorf("Cannot get consumer group: %v", err)
return
}
groupIds := make([]string, 0)
for groupId := range groups.Groups {
if e.groupFilter.MatchString(groupId) {
groupIds = append(groupIds, groupId)
}
}
describeGroups, err := broker.DescribeGroups(&sarama.DescribeGroupsRequest{Groups: groupIds})
if err != nil {
glog.Errorf("Cannot get describe groups: %v", err)
return
}
for _, group := range describeGroups.Groups {
offsetFetchRequest := sarama.OffsetFetchRequest{ConsumerGroup: group.GroupId, Version: 1}
if e.offsetShowAll {
for topic, partitions := range offset {
for partition := range partitions {
offsetFetchRequest.AddPartition(topic, partition)
}
}
} else {
for _, member := range group.Members {
assignment, err := member.GetMemberAssignment()
if err != nil {
glog.Errorf("Cannot get GetMemberAssignment of group member %v : %v", member, err)
return
}
for topic, partions := range assignment.Topics {
for _, partition := range partions {
offsetFetchRequest.AddPartition(topic, partition)
}
}
}
}
// TODO(adrian.arumugam): Hack to patch kafka_exporter to make it reliable ASAP while
// a better upstream patch is figured out.
//ch <- prometheus.MustNewConstMetric(
// consumergroupMembers, prometheus.GaugeValue, float64(len(group.Members)), group.GroupId,
//)
offsetFetchResponse, err := broker.FetchOffset(&offsetFetchRequest)
if err != nil {
glog.Errorf("Cannot get offset of group %s: %v", group.GroupId, err)
continue
}
for topic, partitions := range offsetFetchResponse.Blocks {
// If the topic is not consumed by that consumer group, skip it
topicConsumed := false
for _, offsetFetchResponseBlock := range partitions {
// Kafka will return -1 if there is no offset associated with a topic-partition under that consumer group
if offsetFetchResponseBlock.Offset != -1 {
topicConsumed = true
break
}
}
if !topicConsumed {
continue
}
var currentOffsetSum int64
var lagSum int64
for partition, offsetFetchResponseBlock := range partitions {
err := offsetFetchResponseBlock.Err
if err != sarama.ErrNoError {
glog.Errorf("Error for partition %d :%v", partition, err.Error())
continue
}
currentOffset := offsetFetchResponseBlock.Offset
currentOffsetSum += currentOffset
ch <- prometheus.MustNewConstMetric(
consumergroupCurrentOffset, prometheus.GaugeValue, float64(currentOffset), group.GroupId, topic, strconv.FormatInt(int64(partition), 10),
)
e.mu.Lock()
if offset, ok := offset[topic][partition]; ok {
// If the topic is consumed by that consumer group, but no offset associated with the partition
// forcing lag to -1 to be able to alert on that
var lag int64
if offsetFetchResponseBlock.Offset == -1 {
lag = -1
} else {
lag = offset - offsetFetchResponseBlock.Offset
lagSum += lag
}
ch <- prometheus.MustNewConstMetric(
consumergroupLag, prometheus.GaugeValue, float64(lag), group.GroupId, topic, strconv.FormatInt(int64(partition), 10),
)
} else {
glog.Errorf("No offset of topic %s partition %d, cannot get consumer group lag", topic, partition)
}
e.mu.Unlock()
}
ch <- prometheus.MustNewConstMetric(
consumergroupCurrentOffsetSum, prometheus.GaugeValue, float64(currentOffsetSum), group.GroupId, topic,
)
ch <- prometheus.MustNewConstMetric(
consumergroupLagSum, prometheus.GaugeValue, float64(lagSum), group.GroupId, topic,
)
}
}
}
glog.V(DEBUG).Info("Fetching consumer group metrics")
if len(e.client.Brokers()) > 0 {
for _, broker := range e.client.Brokers() {
wg.Add(1)
go getConsumerGroupMetrics(broker)
}
wg.Wait()
} else {
glog.Errorln("No valid broker, cannot get consumer group metrics")
}
}
func init() {
metrics.UseNilMetrics = true
prometheus.MustRegister(version.NewCollector("kafka_exporter"))
}
func toFlag(name string, help string) *kingpin.FlagClause {
flag.CommandLine.String(name, "", help) // hack around flag.Parse and glog.init flags
return kingpin.Flag(name, help)
}
// hack around flag.Parse and glog.init flags
func toFlagString(name string, help string, value string) *string {
flag.CommandLine.String(name, value, help) // hack around flag.Parse and glog.init flags
return kingpin.Flag(name, help).Default(value).String()
}
func toFlagBool(name string, help string, value bool, valueString string) *bool {
flag.CommandLine.Bool(name, value, help) // hack around flag.Parse and glog.init flags
return kingpin.Flag(name, help).Default(valueString).Bool()
}
func toFlagStringsVar(name string, help string, value string, target *[]string) {
flag.CommandLine.String(name, value, help) // hack around flag.Parse and glog.init flags
kingpin.Flag(name, help).Default(value).StringsVar(target)
}
func toFlagStringVar(name string, help string, value string, target *string) {
flag.CommandLine.String(name, value, help) // hack around flag.Parse and glog.init flags
kingpin.Flag(name, help).Default(value).StringVar(target)
}
func toFlagBoolVar(name string, help string, value bool, valueString string, target *bool) {
flag.CommandLine.Bool(name, value, help) // hack around flag.Parse and glog.init flags
kingpin.Flag(name, help).Default(valueString).BoolVar(target)
}
func toFlagIntVar(name string, help string, value int, valueString string, target *int) {
flag.CommandLine.Int(name, value, help) // hack around flag.Parse and glog.init flags
kingpin.Flag(name, help).Default(valueString).IntVar(target)
}
func main() {
var (
listenAddress = toFlagString("web.listen-address", "Address to listen on for web interface and telemetry.", ":9308")
metricsPath = toFlagString("web.telemetry-path", "Path under which to expose metrics.", "/metrics")
topicFilter = toFlagString("topic.filter", "Regex that determines which topics to collect.", ".*")
groupFilter = toFlagString("group.filter", "Regex that determines which consumer groups to collect.", ".*")
logSarama = toFlagBool("log.enable-sarama", "Turn on Sarama logging.", false, "false")
opts = kafkaOpts{}
)
toFlagStringsVar("kafka.server", "Address (host:port) of Kafka server.", "kafka:9092", &opts.uri)
toFlagBoolVar("sasl.enabled", "Connect using SASL/PLAIN.", false, "false", &opts.useSASL)
toFlagBoolVar("sasl.handshake", "Only set this to false if using a non-Kafka SASL proxy.", true, "true", &opts.useSASLHandshake)
toFlagStringVar("sasl.username", "SASL user name.", "", &opts.saslUsername)
toFlagStringVar("sasl.password", "SASL user password.", "", &opts.saslPassword)
toFlagStringVar("sasl.mechanism", "The SASL SCRAM SHA algorithm sha256 or sha512 or gssapi as mechanism", "", &opts.saslMechanism)
toFlagStringVar("sasl.service-name", "Service name when using kerberos Auth", "", &opts.serviceName)
toFlagStringVar("sasl.kerberos-config-path", "Kerberos config path", "", &opts.kerberosConfigPath)
toFlagStringVar("sasl.realm", "Kerberos realm", "", &opts.realm)
toFlagStringVar("sasl.kerberos-auth-type", "Kerberos auth type. Either 'keytabAuth' or 'userAuth'", "", &opts.kerberosAuthType)
toFlagStringVar("sasl.keytab-path", "Kerberos keytab file path", "", &opts.keyTabPath)
toFlagBoolVar("sasl.disable-PA-FX-FAST", "Configure the Kerberos client to not use PA_FX_FAST.", false, "false", &opts.saslDisablePAFXFast)
toFlagBoolVar("tls.enabled", "Connect to Kafka using TLS.", false, "false", &opts.useTLS)
toFlagStringVar("tls.server-name", "Used to verify the hostname on the returned certificates unless tls.insecure-skip-tls-verify is given. The kafka server's name should be given.", "", &opts.tlsServerName)
toFlagStringVar("tls.ca-file", "The optional certificate authority file for Kafka TLS client authentication.", "", &opts.tlsCAFile)
toFlagStringVar("tls.cert-file", "The optional certificate file for Kafka client authentication.", "", &opts.tlsCertFile)
toFlagStringVar("tls.key-file", "The optional key file for Kafka client authentication.", "", &opts.tlsKeyFile)
toFlagBoolVar("server.tls.enabled", "Enable TLS for web server.", false, "false", &opts.serverUseTLS)
toFlagBoolVar("server.tls.mutual-auth-enabled", "Enable TLS client mutual authentication.", false, "false", &opts.serverMutualAuthEnabled)
toFlagStringVar("server.tls.ca-file", "The certificate authority file for the web server.", "", &opts.serverTlsCAFile)
toFlagStringVar("server.tls.cert-file", "The certificate file for the web server.", "", &opts.serverTlsCertFile)
toFlagStringVar("server.tls.key-file", "The key file for the web server.", "", &opts.serverTlsKeyFile)
toFlagBoolVar("tls.insecure-skip-tls-verify", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.", false, "false", &opts.tlsInsecureSkipTLSVerify)
toFlagStringVar("kafka.version", "Kafka broker version", sarama.V2_0_0_0.String(), &opts.kafkaVersion)
toFlagBoolVar("use.consumelag.zookeeper", "if you need to use a group from zookeeper", false, "false", &opts.useZooKeeperLag)
toFlagStringsVar("zookeeper.server", "Address (hosts) of zookeeper server.", "localhost:2181", &opts.uriZookeeper)
toFlagStringVar("kafka.labels", "Kafka cluster name", "", &opts.labels)
toFlagStringVar("refresh.metadata", "Metadata refresh interval", "30s", &opts.metadataRefreshInterval)
toFlagBoolVar("offset.show-all", "Whether show the offset/lag for all consumer group, otherwise, only show connected consumer groups", true, "true", &opts.offsetShowAll)
toFlagBoolVar("concurrent.enable", "If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters", false, "false", &opts.allowConcurrent)
toFlagIntVar("topic.workers", "Number of topic workers", 100, "100", &opts.topicWorkers)
toFlagIntVar("verbosity", "Verbosity log level", 0, "0", &opts.verbosityLogLevel)
plConfig := plog.Config{}
plogflag.AddFlags(kingpin.CommandLine, &plConfig)
kingpin.Version(version.Print("kafka_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
labels := make(map[string]string)
// Protect against empty labels
if opts.labels != "" {
for _, label := range strings.Split(opts.labels, ",") {
splitted := strings.Split(label, "=")
if len(splitted) >= 2 {
labels[splitted[0]] = splitted[1]
}
}
}
setup(*listenAddress, *metricsPath, *topicFilter, *groupFilter, *logSarama, opts, labels)
}
func setup(
listenAddress string,
metricsPath string,
topicFilter string,
groupFilter string,
logSarama bool,
opts kafkaOpts,
labels map[string]string,
) {
if err := flag.Set("logtostderr", "true"); err != nil {
glog.Errorf("Error on setting logtostderr to true")
}
flag.Set("v", strconv.Itoa(opts.verbosityLogLevel))
flag.Parse()
defer glog.Flush()
glog.V(INFO).Infoln("Starting kafka_exporter", version.Info())
glog.V(DEBUG).Infoln("Build context", version.BuildContext())
clusterBrokers = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "brokers"),
"Number of Brokers in the Kafka Cluster.",
nil, labels,
)
topicPartitions = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partitions"),
"Number of partitions for this Topic",
[]string{"topic"}, labels,
)
topicCurrentOffset = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_current_offset"),
"Current Offset of a Broker at Topic/Partition",
[]string{"topic", "partition"}, labels,
)
topicOldestOffset = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_oldest_offset"),
"Oldest Offset of a Broker at Topic/Partition",
[]string{"topic", "partition"}, labels,
)
topicPartitionLeader = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_leader"),
"Leader Broker ID of this Topic/Partition",
[]string{"topic", "partition"}, labels,
)
topicPartitionReplicas = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_replicas"),
"Number of Replicas for this Topic/Partition",
[]string{"topic", "partition"}, labels,
)
topicPartitionInSyncReplicas = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_in_sync_replica"),
"Number of In-Sync Replicas for this Topic/Partition",
[]string{"topic", "partition"}, labels,
)
topicPartitionUsesPreferredReplica = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_leader_is_preferred"),
"1 if Topic/Partition is using the Preferred Broker",
[]string{"topic", "partition"}, labels,
)
topicUnderReplicatedPartition = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "topic", "partition_under_replicated_partition"),
"1 if Topic/Partition is under Replicated",
[]string{"topic", "partition"}, labels,
)
consumergroupCurrentOffset = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "consumergroup", "current_offset"),
"Current Offset of a ConsumerGroup at Topic/Partition",
[]string{"consumergroup", "topic", "partition"}, labels,
)
consumergroupCurrentOffsetSum = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "consumergroup", "current_offset_sum"),
"Current Offset of a ConsumerGroup at Topic for all partitions",
[]string{"consumergroup", "topic"}, labels,
)
consumergroupLag = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "consumergroup", "lag"),
"Current Approximate Lag of a ConsumerGroup at Topic/Partition",
[]string{"consumergroup", "topic", "partition"}, labels,
)
consumergroupLagZookeeper = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "consumergroupzookeeper", "lag_zookeeper"),
"Current Approximate Lag(zookeeper) of a ConsumerGroup at Topic/Partition",
[]string{"consumergroup", "topic", "partition"}, nil,
)
consumergroupLagSum = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "consumergroup", "lag_sum"),
"Current Approximate Lag of a ConsumerGroup at Topic for all partitions",
[]string{"consumergroup", "topic"}, labels,
)
// TODO(adrian.arumugam): Hack to patch kafka_exporter to make it reliable ASAP while
// a better upstream patch is figured out.
// consumergroupMembers = prometheus.NewDesc(
// prometheus.BuildFQName(namespace, "consumergroup", "members"),
// "Amount of members in a consumer group",
// []string{"consumergroup"}, labels,
// )
if logSarama {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
exporter, err := NewExporter(opts, topicFilter, groupFilter)
if err != nil {
glog.Fatalln(err)
}
defer exporter.client.Close()
prometheus.MustRegister(exporter)
http.Handle(metricsPath, promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Kafka Exporter</title></head>
<body>
<h1>Kafka Exporter</h1>
<p><a href='` + metricsPath + `'>Metrics</a></p>
</body>
</html>`))
})
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
// need more specific sarama check
w.Write([]byte("ok"))
})
if opts.serverUseTLS {
glog.V(INFO).Infoln("Listening on HTTPS", listenAddress)
_, err := CanReadCertAndKey(opts.serverTlsCertFile, opts.serverTlsKeyFile)
if err != nil {
glog.Error("error reading server cert and key")
}
clientAuthType := tls.NoClientCert
if opts.serverMutualAuthEnabled {
clientAuthType = tls.RequireAndVerifyClientCert
}
certPool := x509.NewCertPool()
if opts.serverTlsCAFile != "" {
if caCert, err := ioutil.ReadFile(opts.serverTlsCAFile); err == nil {
certPool.AppendCertsFromPEM(caCert)
} else {
glog.Error("error reading server ca")
}
}
tlsConfig := &tls.Config{
ClientCAs: certPool,
ClientAuth: clientAuthType,
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
},
}
server := &http.Server{
Addr: listenAddress,
TLSConfig: tlsConfig,
}
glog.Fatal(server.ListenAndServeTLS(opts.serverTlsCertFile, opts.serverTlsKeyFile))
} else {
glog.V(INFO).Infoln("Listening on HTTP", listenAddress)
glog.Fatal(http.ListenAndServe(listenAddress, nil))
}
}