forked from linkedin/Burrow
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* Add SASL-SCRAM ability to Kafka connection This PR adds the ability to connect to Kafka via SASL-SCRAM 256 or 512 It adds an entry in the SASL Profile configuration called key=mechanism value type=string required=no default value=(empty) Which accepts either values SCRAM-SHA-256, SCRAM-SHA-512 Partially addresses linkedin#526 * Ignore ZooKeeper znode Create if the path already exists Currently, Burrow will attempt to create the znode used by Burrow on startup This will cause problems if there is authentication needed when connecting to zk. The fix is to ignore creating zk node paths if it already exists * Yext specific Dockerfile the config file and dir used by Burrow is updated for M4 and Khan * Update module and import references go build -o build/Burrow github.com/rjh-yext/Burrow pulls in linkedin's branch of Burrow Changing references of linkedin to current fork * Fix Travis CI build. * Add Prometheus Metrics Exporter * Add support for Kafka 2.5.0 anf Go 1.14 * NameToCertificate only allows associating a single certificate with a given name. Leave that field nil to let the library select the first compatible chain from Certificates. * Update sarama with a fix for IBM/sarama#1692 * Removing CI jobs from upstream merge. Co-authored-by: Roger Hwang <[email protected]> Co-authored-by: rjh-yext <[email protected]> Co-authored-by: klDen <[email protected]> Co-authored-by: Michael Wain <[email protected]> Co-authored-by: Vlad Gorodetsky <[email protected]> Co-authored-by: Vlad Gorodetsky <[email protected]>
- Loading branch information
1 parent
9d3a2c5
commit 26d79f3
Showing
13 changed files
with
696 additions
and
185 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
package helpers | ||
|
||
import ( | ||
"crypto/sha256" | ||
"crypto/sha512" | ||
|
||
"github.com/xdg/scram" | ||
) | ||
|
||
var SHA256 scram.HashGeneratorFcn = sha256.New | ||
var SHA512 scram.HashGeneratorFcn = sha512.New | ||
|
||
type XDGSCRAMClient struct { | ||
*scram.Client | ||
*scram.ClientConversation | ||
scram.HashGeneratorFcn | ||
} | ||
|
||
func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { | ||
x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) | ||
if err != nil { | ||
return err | ||
} | ||
x.ClientConversation = x.Client.NewConversation() | ||
return nil | ||
} | ||
|
||
func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { | ||
response, err = x.ClientConversation.Step(challenge) | ||
return | ||
} | ||
|
||
func (x *XDGSCRAMClient) Done() bool { | ||
return x.ClientConversation.Done() | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,182 @@ | ||
package httpserver | ||
|
||
import ( | ||
"net/http" | ||
"strconv" | ||
|
||
"github.com/prometheus/client_golang/prometheus" | ||
|
||
"github.com/linkedin/Burrow/core/protocol" | ||
|
||
"github.com/prometheus/client_golang/prometheus/promauto" | ||
"github.com/prometheus/client_golang/prometheus/promhttp" | ||
) | ||
|
||
var ( | ||
consumerTotalLagGauge = promauto.NewGaugeVec( | ||
prometheus.GaugeOpts{ | ||
Name: "burrow_kafka_consumer_lag_total", | ||
Help: "The sum of all partition current lag values for the group", | ||
}, | ||
[]string{"cluster", "consumer_group"}, | ||
) | ||
|
||
consumerStatusGauge = promauto.NewGaugeVec( | ||
prometheus.GaugeOpts{ | ||
Name: "burrow_kafka_consumer_status", | ||
Help: "The status of the consumer group. It is calculated from the highest status for the individual partitions. Statuses are an index list from NOTFOUND, OK, WARN, ERR, STOP, STALL, REWIND", | ||
}, | ||
[]string{"cluster", "consumer_group"}, | ||
) | ||
|
||
consumerPartitionCurrentOffset = promauto.NewGaugeVec( | ||
prometheus.GaugeOpts{ | ||
Name: "burrow_kafka_consumer_current_offset", | ||
Help: "Latest offset that Burrow is storing for this partition", | ||
}, | ||
[]string{"cluster", "consumer_group", "topic", "partition"}, | ||
) | ||
|
||
consumerPartitionLagGauge = promauto.NewGaugeVec( | ||
prometheus.GaugeOpts{ | ||
Name: "burrow_kafka_consumer_partition_lag", | ||
Help: "Number of messages the consumer group is behind by for a partition as reported by Burrow", | ||
}, | ||
[]string{"cluster", "consumer_group", "topic", "partition"}, | ||
) | ||
|
||
topicPartitionOffsetGauge = promauto.NewGaugeVec( | ||
prometheus.GaugeOpts{ | ||
Name: "burrow_kafka_topic_partition_offset", | ||
Help: "Latest offset the topic that Burrow is storing for this partition", | ||
}, | ||
[]string{"cluster", "topic", "partition"}, | ||
) | ||
) | ||
|
||
func (hc *Coordinator) handlePrometheusMetrics() http.HandlerFunc { | ||
promHandler := promhttp.Handler() | ||
|
||
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { | ||
for _, cluster := range listClusters(hc.App) { | ||
for _, consumer := range listConsumers(hc.App, cluster) { | ||
consumerStatus := getFullConsumerStatus(hc.App, cluster, consumer) | ||
|
||
if consumerStatus == nil || | ||
consumerStatus.Status == protocol.StatusNotFound || | ||
consumerStatus.Complete < 1.0 { | ||
continue | ||
} | ||
|
||
labels := map[string]string{ | ||
"cluster": cluster, | ||
"consumer_group": consumer, | ||
} | ||
|
||
consumerTotalLagGauge.With(labels).Set(float64(consumerStatus.TotalLag)) | ||
consumerStatusGauge.With(labels).Set(float64(consumerStatus.Status)) | ||
|
||
for _, partition := range consumerStatus.Partitions { | ||
if partition.Complete < 1.0 { | ||
continue | ||
} | ||
|
||
labels := map[string]string{ | ||
"cluster": cluster, | ||
"consumer_group": consumer, | ||
"topic": partition.Topic, | ||
"partition": strconv.FormatInt(int64(partition.Partition), 10), | ||
} | ||
|
||
consumerPartitionCurrentOffset.With(labels).Set(float64(partition.End.Offset)) | ||
consumerPartitionLagGauge.With(labels).Set(float64(partition.CurrentLag)) | ||
} | ||
} | ||
|
||
// Topics | ||
for _, topic := range listTopics(hc.App, cluster) { | ||
for partitionNumber, offset := range getTopicDetail(hc.App, cluster, topic) { | ||
topicPartitionOffsetGauge.With(map[string]string{ | ||
"cluster": cluster, | ||
"topic": topic, | ||
"partition": strconv.FormatInt(int64(partitionNumber), 10), | ||
}).Set(float64(offset)) | ||
} | ||
} | ||
} | ||
|
||
promHandler.ServeHTTP(resp, req) | ||
}) | ||
} | ||
|
||
func listClusters(app *protocol.ApplicationContext) []string { | ||
request := &protocol.StorageRequest{ | ||
RequestType: protocol.StorageFetchClusters, | ||
Reply: make(chan interface{}), | ||
} | ||
app.StorageChannel <- request | ||
response := <-request.Reply | ||
if response == nil { | ||
return []string{} | ||
} | ||
|
||
return response.([]string) | ||
} | ||
|
||
func listConsumers(app *protocol.ApplicationContext, cluster string) []string { | ||
request := &protocol.StorageRequest{ | ||
RequestType: protocol.StorageFetchConsumers, | ||
Cluster: cluster, | ||
Reply: make(chan interface{}), | ||
} | ||
app.StorageChannel <- request | ||
response := <-request.Reply | ||
if response == nil { | ||
return []string{} | ||
} | ||
|
||
return response.([]string) | ||
} | ||
|
||
func getFullConsumerStatus(app *protocol.ApplicationContext, cluster, consumer string) *protocol.ConsumerGroupStatus { | ||
request := &protocol.EvaluatorRequest{ | ||
Cluster: cluster, | ||
Group: consumer, | ||
ShowAll: true, | ||
Reply: make(chan *protocol.ConsumerGroupStatus), | ||
} | ||
app.EvaluatorChannel <- request | ||
response := <-request.Reply | ||
return response | ||
} | ||
|
||
func listTopics(app *protocol.ApplicationContext, cluster string) []string { | ||
request := &protocol.StorageRequest{ | ||
RequestType: protocol.StorageFetchTopics, | ||
Cluster: cluster, | ||
Reply: make(chan interface{}), | ||
} | ||
app.StorageChannel <- request | ||
response := <-request.Reply | ||
if response == nil { | ||
return []string{} | ||
} | ||
|
||
return response.([]string) | ||
} | ||
|
||
func getTopicDetail(app *protocol.ApplicationContext, cluster, topic string) []int64 { | ||
request := &protocol.StorageRequest{ | ||
RequestType: protocol.StorageFetchTopic, | ||
Cluster: cluster, | ||
Topic: topic, | ||
Reply: make(chan interface{}), | ||
} | ||
app.StorageChannel <- request | ||
response := <-request.Reply | ||
if response == nil { | ||
return []int64{} | ||
} | ||
|
||
return response.([]int64) | ||
} |
Oops, something went wrong.