diff --git a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go index b2466b83791e1..685b4a4d1a6eb 100644 --- a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go +++ b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go @@ -5,7 +5,7 @@ import ( "reflect" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/server" diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser.go b/clients/pkg/promtail/targets/azureeventhubs/parser.go index 659f1a2e7a643..3c6a24750ba5c 100644 --- a/clients/pkg/promtail/targets/azureeventhubs/parser.go +++ b/clients/pkg/promtail/targets/azureeventhubs/parser.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go index 662dce4358790..e41fb0219cb4b 100644 --- a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go +++ b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/assert" diff --git a/clients/pkg/promtail/targets/azureeventhubs/target_syncer.go b/clients/pkg/promtail/targets/azureeventhubs/target_syncer.go index bc2175768f460..b9ceedea9d9f6 100644 --- a/clients/pkg/promtail/targets/azureeventhubs/target_syncer.go +++ b/clients/pkg/promtail/targets/azureeventhubs/target_syncer.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" diff --git a/clients/pkg/promtail/targets/azureeventhubs/target_syncer_test.go b/clients/pkg/promtail/targets/azureeventhubs/target_syncer_test.go index 1874453cf364b..e1e1e2c79ef12 100644 --- a/clients/pkg/promtail/targets/azureeventhubs/target_syncer_test.go +++ b/clients/pkg/promtail/targets/azureeventhubs/target_syncer_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" diff --git a/clients/pkg/promtail/targets/kafka/consumer.go b/clients/pkg/promtail/targets/kafka/consumer.go index f4b8a4d260cf2..2d4412eab4dd2 100644 --- a/clients/pkg/promtail/targets/kafka/consumer.go +++ b/clients/pkg/promtail/targets/kafka/consumer.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/backoff" diff --git a/clients/pkg/promtail/targets/kafka/consumer_test.go b/clients/pkg/promtail/targets/kafka/consumer_test.go index d6ef82ba9addf..5fc9b453a845b 100644 --- a/clients/pkg/promtail/targets/kafka/consumer_test.go +++ b/clients/pkg/promtail/targets/kafka/consumer_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/clients/pkg/promtail/targets/kafka/parser.go b/clients/pkg/promtail/targets/kafka/parser.go index 9ad3b7f8271c0..4c9c5f89dcc76 100644 --- a/clients/pkg/promtail/targets/kafka/parser.go +++ b/clients/pkg/promtail/targets/kafka/parser.go @@ -1,7 +1,7 @@ package kafka import ( - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/clients/pkg/promtail/targets/kafka/target.go b/clients/pkg/promtail/targets/kafka/target.go index 707cc01ca1947..d1f06ae63d25a 100644 --- a/clients/pkg/promtail/targets/kafka/target.go +++ b/clients/pkg/promtail/targets/kafka/target.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/common/model" diff --git a/clients/pkg/promtail/targets/kafka/target_syncer.go b/clients/pkg/promtail/targets/kafka/target_syncer.go index 6afcd24ad7832..411bf1680bb5d 100644 --- a/clients/pkg/promtail/targets/kafka/target_syncer.go +++ b/clients/pkg/promtail/targets/kafka/target_syncer.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -77,11 +77,11 @@ func NewSyncerFromScrapeConfig( switch cfg.KafkaConfig.Assignor { case sarama.StickyBalanceStrategyName: - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + config.Consumer.Group.Rebalance.Strategy = sarama.NewBalanceStrategySticky() case sarama.RoundRobinBalanceStrategyName: - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + config.Consumer.Group.Rebalance.Strategy = sarama.NewBalanceStrategyRoundRobin() case sarama.RangeBalanceStrategyName, "": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + config.Consumer.Group.Rebalance.Strategy = sarama.NewBalanceStrategyRange() default: return nil, fmt.Errorf("unrecognized consumer group partition assignor: %s", cfg.KafkaConfig.Assignor) } diff --git a/clients/pkg/promtail/targets/kafka/target_syncer_test.go b/clients/pkg/promtail/targets/kafka/target_syncer_test.go index 9a279c2a3670b..98d6e1acc65ce 100644 --- a/clients/pkg/promtail/targets/kafka/target_syncer_test.go +++ b/clients/pkg/promtail/targets/kafka/target_syncer_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/prometheus/client_golang/prometheus" diff --git a/clients/pkg/promtail/targets/kafka/target_test.go b/clients/pkg/promtail/targets/kafka/target_test.go index 3ffe4ac69f16b..9a2375238fa00 100644 --- a/clients/pkg/promtail/targets/kafka/target_test.go +++ b/clients/pkg/promtail/targets/kafka/target_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" diff --git a/go.mod b/go.mod index c60b93722b961..b57bcd68ffef7 100644 --- a/go.mod +++ b/go.mod @@ -12,9 +12,9 @@ require ( github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 + github.com/IBM/sarama v1.43.3 github.com/Masterminds/sprig/v3 v3.2.3 github.com/NYTimes/gziphandler v1.1.1 - github.com/Shopify/sarama v1.38.1 github.com/Workiva/go-datastructures v1.1.5 github.com/alicebob/miniredis/v2 v2.30.4 github.com/aliyun/aliyun-oss-go-sdk v2.2.10+incompatible @@ -252,8 +252,8 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dolthub/maphash v0.1.0 // indirect - github.com/eapache/go-resiliency v1.3.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -307,7 +307,7 @@ require ( github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect diff --git a/go.sum b/go.sum index 2f4493d18daec..a3f541f49a703 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,8 @@ github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1 github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns= github.com/IBM/ibm-cos-sdk-go v1.11.1 h1:Pye61hmWA4ZVCfOfFLTJBjPka4HIGrLqmpZ2d2KlrCE= github.com/IBM/ibm-cos-sdk-go v1.11.1/go.mod h1:d8vET3w8wgmGwCsCVs+0y4V8+1hRNT6+pbpGaEHvSCI= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573 h1:DCPjdUAi+jcGnL7iN+A7uNY8xG584oMRuisYh/VE21E= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= @@ -296,12 +298,7 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.1.5 h1:5YfhQ4ry7bZc2Mc7R0YZyYwpf5c6t1cEFvdAhd6Mkf4= @@ -583,11 +580,11 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= -github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE= @@ -1265,8 +1262,8 @@ github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVET github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= -github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= @@ -2150,11 +2147,11 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore similarity index 100% rename from vendor/github.com/Shopify/sarama/.gitignore rename to vendor/github.com/IBM/sarama/.gitignore diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml similarity index 78% rename from vendor/github.com/Shopify/sarama/.golangci.yml rename to vendor/github.com/IBM/sarama/.golangci.yml index 0b419abbfaecc..72e3e4c2448a5 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -19,61 +19,62 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/Shopify/sarama + local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic + - performance # - experimental # - opinionated - # - performance # - style + enabled-checks: + - importShadow + - nestingReduce + - stringsCompare + # - unnamedResult + # - whyNoLint disabled-checks: - assignOp - appendAssign - commentedOutCode + - hugeParam - ifElseChain - singleCaseSwitch - sloppyReassign - - wrapperFunc funlen: lines: 300 statements: 300 + depguard: + rules: + main: + deny: + - pkg: "io/ioutil" + desc: Use the "io" and "os" packages instead. + linters: disable-all: true enable: - bodyclose - - deadcode - depguard - exportloopref - dogsled - # - dupl - errcheck - errorlint - funlen - gochecknoinits - # - goconst - gocritic - gocyclo - gofmt - goimports - # - golint - gosec - # - gosimple - govet - # - ineffassign - misspell - # - nakedret - nilerr - # - paralleltest - # - scopelint - staticcheck - - structcheck - # - stylecheck - typecheck - unconvert - unused - - varcheck - whitespace issues: diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml new file mode 100644 index 0000000000000..1869b8160ed5f --- /dev/null +++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml @@ -0,0 +1,41 @@ +fail_fast: false +default_install_hook_types: [pre-commit, commit-msg] +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: fix-byte-order-marker + - id: mixed-line-ending + - id: trailing-whitespace + - repo: local + hooks: + - id: conventional-commit-msg-validation + name: commit message conventional validation + language: pygrep + entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)' + args: [--multiline, --negate] + stages: [commit-msg] + - id: commit-msg-needs-to-be-signed-off + name: commit message needs to be signed off + language: pygrep + entry: "^Signed-off-by:" + args: [--multiline, --negate] + stages: [commit-msg] + - id: gofmt + name: gofmt + description: Format files with gofmt. + entry: gofmt -l + language: golang + files: \.go$ + args: [] + - repo: https://github.com/gitleaks/gitleaks + rev: v8.16.3 + hooks: + - id: gitleaks + - repo: https://github.com/golangci/golangci-lint + rev: v1.52.2 + hooks: + - id: golangci-lint diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md new file mode 100644 index 0000000000000..99abeb3ecbb5c --- /dev/null +++ b/vendor/github.com/IBM/sarama/CHANGELOG.md @@ -0,0 +1,1760 @@ +# Changelog + +## Version 1.42.2 (2024-02-09) + +## What's Changed + +⚠️ The go.mod directive has been bumped to 1.18 as the minimum version of Go required for the module. This was necessary to continue to receive updates from some of the third party dependencies that Sarama makes use of for compression. + +### :tada: New Features / Improvements +* feat: update go directive to 1.18 by @dnwe in https://github.com/IBM/sarama/pull/2713 +* feat: return KError instead of errors in AlterConfigs and DescribeConfig by @zhuliquan in https://github.com/IBM/sarama/pull/2472 +### :bug: Fixes +* fix: don't waste time for backoff on member id required error by @lzakharov in https://github.com/IBM/sarama/pull/2759 +* fix: prevent ConsumerGroup.Close infinitely locking by @maqdev in https://github.com/IBM/sarama/pull/2717 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/IBM/sarama/pull/2716 +* chore(deps): bump golang.org/x/sync to v0.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2718 +* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.18 to 4.1.19 by @dependabot in https://github.com/IBM/sarama/pull/2739 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2748 +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2734 +* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2764 +* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.19 to 4.1.21 by @dependabot in https://github.com/IBM/sarama/pull/2763 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/exactly_once by @dependabot in https://github.com/IBM/sarama/pull/2749 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/consumergroup by @dependabot in https://github.com/IBM/sarama/pull/2750 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/sasl_scram_client by @dependabot in https://github.com/IBM/sarama/pull/2751 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2752 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/http_server by @dependabot in https://github.com/IBM/sarama/pull/2753 +* chore(deps): bump github.com/eapache/go-resiliency from 1.4.0 to 1.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2745 +* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/txn_producer by @dependabot in https://github.com/IBM/sarama/pull/2754 +* chore(deps): bump go.opentelemetry.io/otel/sdk from 1.19.0 to 1.22.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2767 +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2793 +* chore(deps): bump go.opentelemetry.io/otel/exporters/stdout/stdoutmetric from 0.42.0 to 1.23.1 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2792 +### :wrench: Maintenance +* fix(examples): housekeeping of code and deps by @dnwe in https://github.com/IBM/sarama/pull/2720 +### :heavy_plus_sign: Other Changes +* fix(test): retry MockBroker Listen for EADDRINUSE by @dnwe in https://github.com/IBM/sarama/pull/2721 + +## New Contributors +* @maqdev made their first contribution in https://github.com/IBM/sarama/pull/2717 +* @zhuliquan made their first contribution in https://github.com/IBM/sarama/pull/2472 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.1...v1.42.2 + +## Version 1.42.1 (2023-11-07) + +## What's Changed +### :bug: Fixes +* fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705 +* fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1 + +## Version 1.42.0 (2023-11-02) + +## What's Changed +### :bug: Fixes +* Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693 +* Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698 +* fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678 +* Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700 +* fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701 +### :wrench: Maintenance +* chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685 +* chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692 +### :heavy_plus_sign: Other Changes +* chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683 +* fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689 +* chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691 + +## New Contributors +* @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693 +* @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0 + +## Version 1.41.3 (2023-10-17) + +## What's Changed +### :bug: Fixes +* fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663 +* fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661 +* chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671 +### :memo: Documentation +* fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657 + +## New Contributors +* @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657 +* @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3 + +## Version 1.41.2 (2023-09-12) + +## What's Changed +### :tada: New Features / Improvements +* perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646 +### :bug: Fixes +* fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636 +* fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642 +* fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641 + +## New Contributors +* @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2 + +## Version 1.41.1 (2023-08-30) + +## What's Changed +### :bug: Fixes +* fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618 +* fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628 +* fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633 +* fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632 +### :wrench: Maintenance +* chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620 +### :memo: Documentation +* fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386 +* chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627 +### :heavy_plus_sign: Other Changes +* chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1 + +## Version 1.41.0 (2023-08-21) + +## What's Changed +### :rotating_light: Breaking Changes + +Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version. + +* chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572 +* chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574 +### :tada: New Features / Improvements +* Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156 +* feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536 +* feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538 +* fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544 +* Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541 +* fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546 +* fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548 +* chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549 +* chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550 +* chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551 +* fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553 +* fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554 +* fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555 +* fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556 +* fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558 +* fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565 +* fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568 +* feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566 +* fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570 +* feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573 +* feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475 +* feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600 +### :bug: Fixes +* fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528 +* fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532 +* extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533 +* Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545 +* fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563 +* bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560 +* fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569 +* fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594 +* fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595 +* fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614 +* fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616 +* fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542 +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586 +* chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588 +* chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598 +### :wrench: Maintenance +* fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531 +* chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537 +* Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543 +* fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552 +* chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557 +* fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571 +* chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576 +* chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575 +* chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578 +* chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580 +* chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579 +* chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581 +* chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591 +* feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592 +* chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593 +* feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601 +* fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603 +* fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612 +* fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613 +* chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615 +* feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605 +### :memo: Documentation +* fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479 +* chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599 + +## New Contributors +* @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156 +* @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543 +* @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479 +* @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560 +* @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475 +* @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0 + +## Version 1.40.1 (2023-07-27) + +## What's Changed +### :tada: New Features / Improvements +* Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484 +* feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457 +### :bug: Fixes +* Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517 +* fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519 +* Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520 +* fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522 +* fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509 +* chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513 +* chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512 +### :wrench: Maintenance +* chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496 +* chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504 +* Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514 +### :heavy_plus_sign: Other Changes +* chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497 +* chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508 +* fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523 + +## New Contributors +* @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504 +* @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514 +* @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484 +* @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1 + +## Version 1.40.0 (2023-07-17) + +## What's Changed + +Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 + +### :rotating_light: Breaking Changes + +- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 +- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 + +### :bug: Fixes + +- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 +- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 +- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 +- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 +- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 + +### :package: Dependency updates + +- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 + +### :wrench: Maintenance + +- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 +- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 +- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 + +## New Contributors + +- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 +- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 +- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 + +## Version 1.38.1 (2023-01-22) + +## What's Changed +### :bug: Fixes +* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 +* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 +* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 +* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 +* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 +* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 + +## New Contributors +* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 +* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 + +## Version 1.38.0 (2023-01-08) + +## What's Changed +### :tada: New Features / Improvements +* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 +* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 +* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 +### :bug: Fixes +* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 +* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 +* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 +* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 +* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 +* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 +### :wrench: Maintenance +* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 +* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 + +## New Contributors +* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 +* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 +* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 +* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 + +## Version 1.37.2 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 +### :heavy_plus_sign: Other Changes +* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 + +## Version 1.37.1 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 +* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 +* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 + +## New Contributors +* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 + +## Version 1.37.0 (2022-09-28) + +## What's Changed + +### :rotating_light: Breaking Changes +* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. + +### :tada: New Features / Improvements +* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 +* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 +* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 +### :bug: Fixes +* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 +* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 +* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 +* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 +* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 +* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 +### :package: Dependency updates +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 +* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 +* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 +* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 +* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 +* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 +### :wrench: Maintenance +* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 +* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 + +## New Contributors +* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 +* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 +* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 +* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 +* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 + +## Version 1.36.0 (2022-08-11) + +## What's Changed +### :tada: New Features / Improvements +* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 +* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 +* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 +### :bug: Fixes +* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 +* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 +* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 +* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 +* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 +* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 +* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 +### :wrench: Maintenance +* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 +### :heavy_plus_sign: Other Changes +* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 +* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 +* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 + +## New Contributors +* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 +* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 +* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 + +## Version 1.35.0 (2022-07-22) + +## What's Changed +### :bug: Fixes +* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 +* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 +* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 +* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 +### :package: Dependency updates +* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 +* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 +* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 +* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 +### :wrench: Maintenance +* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 +* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 +* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 +### :heavy_plus_sign: Other Changes +* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 +* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 +* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 + +## New Contributors +* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 + +## Version 1.34.1 (2022-06-07) + +## What's Changed +### :bug: Fixes +* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 +* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 +* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 +* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 +### :wrench: Maintenance +* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 +* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 + +## New Contributors +* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 +* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 + +## Version 1.34.0 (2022-05-30) + +## What's Changed +### :tada: New Features / Improvements +* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 +### :bug: Fixes +* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 +### :wrench: Maintenance +* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 +* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 + +## New Contributors +* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 + +## Version 1.33.0 (2022-05-11) + +## What's Changed +### :rotating_light: Breaking Changes + +**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** +* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 + + +### :tada: New Features / Improvements +* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 +* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 +* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 +### :bug: Fixes +* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 +* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 +* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 +* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 +* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 +* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 +* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 +* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 +* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 +* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 +* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 +* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 +* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 +* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 +* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 +* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 +* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 +* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 +### :package: Dependency updates +* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 +### :wrench: Maintenance +* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 +* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 +* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 +* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 +### :heavy_plus_sign: Other Changes +* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 +* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 +* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 + +## New Contributors +* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 +* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 +* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 +* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 +* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 +* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 +* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 + +## Version 1.32.0 (2022-02-24) + +### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. + +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 + +--- + +## What's Changed +### :bug: Fixes +* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 +### :package: Dependency updates +* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 +### :wrench: Maintenance +* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 +* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 +* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 +### :heavy_plus_sign: Other Changes +* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 +* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 +* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 +* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 + +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 + +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +## Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +## Version 1.26.4 (2020-05-19) + +## Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +## Version 1.26.3 (2020-05-07) + +## Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +## Version 1.26.2 (2020-05-06) + +## ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +### Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +## Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +## Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) + +## Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/IBM/sarama/pull/1574), + [1582](https://github.com/IBM/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/IBM/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/IBM/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/IBM/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/IBM/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/IBM/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/IBM/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/IBM/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/IBM/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/IBM/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/IBM/sarama/pull/1586)). + +## Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/IBM/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/IBM/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/IBM/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/IBM/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/IBM/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/IBM/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/IBM/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/IBM/sarama/pull/1545)). + +## Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/IBM/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/IBM/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/IBM/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/IBM/sarama/pull/1529)). + +## Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/IBM/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/IBM/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/IBM/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/IBM/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/IBM/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/IBM/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/IBM/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/IBM/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/IBM/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/IBM/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/IBM/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/IBM/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/IBM/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/IBM/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/IBM/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/IBM/sarama/issues/1252 + +## Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/IBM/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/IBM/sarama/pull/1428)). + +## Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/IBM/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/IBM/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/IBM/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/IBM/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/IBM/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/IBM/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/IBM/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/IBM/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/IBM/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/IBM/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/IBM/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/IBM/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/IBM/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/IBM/sarama/pull/1368)). + +## Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/IBM/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/IBM/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/IBM/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/IBM/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/IBM/sarama/pull/1344)). + +## Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/IBM/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/IBM/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/IBM/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/IBM/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/IBM/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/IBM/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/IBM/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/IBM/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/IBM/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/IBM/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/IBM/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/IBM/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/IBM/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/IBM/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/IBM/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/IBM/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/IBM/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/IBM/sarama/pull/1156)). + +## Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/IBM/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/IBM/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/IBM/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/IBM/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/IBM/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/IBM/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/IBM/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/IBM/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/IBM/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/IBM/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/IBM/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/IBM/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/IBM/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/IBM/sarama/pull/1273)). + +## Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/IBM/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/IBM/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/IBM/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/IBM/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/IBM/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/IBM/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/IBM/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/IBM/sarama/pull/1141)). + +## Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/IBM/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/IBM/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/IBM/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/IBM/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/IBM/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/IBM/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/IBM/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/IBM/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/IBM/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/IBM/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/IBM/sarama/pull/1228)). + +## Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/IBM/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/IBM/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/IBM/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/IBM/sarama/pull/1174)). + +## Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/IBM/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/IBM/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/IBM/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/IBM/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/IBM/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/IBM/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/IBM/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/IBM/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/IBM/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/IBM/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/IBM/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/IBM/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/IBM/sarama/pull/1125)). + +## Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/IBM/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/IBM/sarama/pull/1047), + [#1069](https://github.com/IBM/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/IBM/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/IBM/sarama/pull/1065), + [#1096](https://github.com/IBM/sarama/pull/1096), + [#1027](https://github.com/IBM/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/IBM/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/IBM/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/IBM/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/IBM/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/IBM/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/IBM/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/IBM/sarama/pull/1050), + [#1051](https://github.com/IBM/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/IBM/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/IBM/sarama/pull/1092)). + +## Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/IBM/sarama/pull/1007), + [#1008](https://github.com/IBM/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/IBM/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/IBM/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/IBM/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/IBM/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/IBM/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/IBM/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/IBM/sarama/pull/1002), + [#1015](https://github.com/IBM/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/IBM/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/IBM/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/IBM/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/IBM/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/IBM/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/IBM/sarama/pull/1035)). + +## Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/IBM/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/IBM/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/IBM/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/IBM/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/IBM/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/IBM/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/IBM/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/IBM/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/IBM/sarama/pull/991)). + +## Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/IBM/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/IBM/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/IBM/sarama/pull/975)). + +## Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/IBM/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/IBM/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/IBM/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/IBM/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/IBM/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/IBM/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/IBM/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/IBM/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/IBM/sarama/pull/940)). + +## Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/IBM/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/IBM/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/IBM/sarama/pull/837), + [#841](https://github.com/IBM/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/IBM/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/IBM/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/IBM/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/IBM/sarama/pull/859)). + +## Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/IBM/sarama/pull/701), + [#746](https://github.com/IBM/sarama/pull/746), + [#766](https://github.com/IBM/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/IBM/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/IBM/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/IBM/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/IBM/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/IBM/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/IBM/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/IBM/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/IBM/sarama/pull/795)). + +## Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/IBM/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/IBM/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/IBM/sarama/pull/730), + [#733](https://github.com/IBM/sarama/pull/733), + [#734](https://github.com/IBM/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/IBM/sarama/pull/735)). + +## Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and +[#713](https://github.com/IBM/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/IBM/sarama/pull/672), + [#678](https://github.com/IBM/sarama/pull/678), + [#681](https://github.com/IBM/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/IBM/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/IBM/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/IBM/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/IBM/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/IBM/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/IBM/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/IBM/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/IBM/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/IBM/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/IBM/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/IBM/sarama/pull/709)). + +## Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/IBM/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/IBM/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/IBM/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/IBM/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/IBM/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/IBM/sarama/pull/605), + [#621](https://github.com/IBM/sarama/pull/621), + [#654](https://github.com/IBM/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/IBM/sarama/pull/658)). + +## Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/IBM/sarama/pull/586), + [#588](https://github.com/IBM/sarama/pull/588), + [#590](https://github.com/IBM/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/IBM/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/IBM/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/IBM/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/IBM/sarama/pull/589)). + +## Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/IBM/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/IBM/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/IBM/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/IBM/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/IBM/sarama/pull/549), + [#550](https://github.com/IBM/sarama/pull/550), + [#551](https://github.com/IBM/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/IBM/sarama/pull/553)). + +## Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/IBM/sarama/pull/449)). + +## Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/IBM/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/IBM/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), + [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways + ([#528](https://github.com/IBM/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/IBM/sarama/pull/529)). + +## Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/IBM/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/IBM/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/IBM/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/IBM/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/IBM/sarama/pull/475)). + +## Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/IBM/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). + +## Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/IBM/sarama/pull/456)). + +## Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/IBM/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/IBM/sarama/pull/450), + [#451](https://github.com/IBM/sarama/pull/451)). + +## Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/IBM/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/IBM/sarama/pull/439), + [#442](https://github.com/IBM/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/IBM/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/IBM/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/IBM/sarama/pull/325)). + +## Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/IBM/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/IBM/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/IBM/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/IBM/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/IBM/sarama/pull/422)). + +## Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/IBM/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/IBM/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/IBM/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/IBM/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/IBM/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/IBM/sarama/pull/390), + [#400](https://github.com/IBM/sarama/pull/400)). + +## Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/IBM/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/IBM/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/IBM/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). + + +## Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..8470ec5ce943b --- /dev/null +++ b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +dominic.evans@uk.ibm.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md new file mode 100644 index 0000000000000..bb88127c0e911 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +[fork]: https://github.com/IBM/sarama/fork +[pr]: https://github.com/IBM/sarama/compare +[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license + +Hi there! We are thrilled that you would like to contribute to Sarama. +Contributions are always welcome, both reporting issues and submitting pull requests! + +## Reporting issues + +Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. + +- What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version. +- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. +- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. + +Also, please include the following information about your environment, so we can help you faster: + +- What version of Kafka are you using? +- What version of Go are you using? +- What are the values of your Producer/Consumer/Client configuration? + + +## Contributing a change + +Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). +By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). +The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. + +Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: + +``` +feat: this is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a `-s` command line option to append this automatically to your +commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +Because this library is in production use by many people and applications, we code review all additions. +To make the review process go as smooth as possible, please consider the following. + +- If you plan to work on something major, please open an issue to discuss the design first. +- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. +- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. +- Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs. +- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. +- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. +- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. +- Make sure your code is supported by all the Go versions we support. + You can rely on GitHub Actions for testing older Go versions. + +## Submitting a pull request + +0. [Fork][fork] and clone the repository +1. Create a new branch: `git checkout -b my-branch-name` +2. Make your change, push to your fork and [submit a pull request][pr] +3. Wait for your pull request to be reviewed and merged. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. +- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +## Further Reading + +- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) +- [The most powerful contributor agreement](https://lwn.net/Articles/592503/) +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka new file mode 100644 index 0000000000000..40f5f333b5f1e --- /dev/null +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -0,0 +1,47 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:de2a0a20c1c3b39c3de829196de9694d09f97cd18fda1004de855ed2b4c841ba + +USER root + +RUN microdnf update -y \ + && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf reinstall -y tzdata \ + && microdnf clean all + +ENV JAVA_HOME=/usr/lib/jvm/jre-11 + +# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html +# Ensure Java doesn't cache any dns results +RUN cd /etc/java/java-11-openjdk/*/conf/security \ + && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ + && echo 'networkaddress.cache.ttl=0' >> java.security \ + && echo 'networkaddress.cache.negative.ttl=0' >> java.security + +ARG SCALA_VERSION="2.13" +ARG KAFKA_VERSION="3.6.0" + +# https://github.com/apache/kafka/blob/9989b68d0d38c8f1357f78bf9d53a58c1476188d/tests/docker/Dockerfile#L46-L72 +ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ + && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ + && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" + +# older kafka versions depend upon jaxb-api being bundled with the JDK, but it +# was removed from Java 11 so work around that by including it in the kafka +# libs dir regardless +WORKDIR /tmp +RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ + && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ + && rm -f jaxb-api-2.3.0.jar + +WORKDIR /opt/kafka-${KAFKA_VERSION} + +ENV JAVA_MAJOR_VERSION=11 + +RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh + +COPY entrypoint.sh / + +USER 65534:65534 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/IBM/sarama/LICENSE.md similarity index 95% rename from vendor/github.com/Shopify/sarama/LICENSE rename to vendor/github.com/IBM/sarama/LICENSE.md index d2bf4352f4c94..f8f64d4173a2d 100644 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ b/vendor/github.com/IBM/sarama/LICENSE.md @@ -1,5 +1,9 @@ +# MIT License + Copyright (c) 2013 Shopify +Copyright (c) 2023 IBM Corporation + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile similarity index 100% rename from vendor/github.com/Shopify/sarama/Makefile rename to vendor/github.com/IBM/sarama/Makefile diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/IBM/sarama/README.md similarity index 63% rename from vendor/github.com/Shopify/sarama/README.md rename to vendor/github.com/IBM/sarama/README.md index 0ee6e6a7f68ca..4534d7b41d717 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/IBM/sarama/README.md @@ -1,18 +1,19 @@ # sarama -[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/IBM/sarama/badge?style=flat)](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7996/badge)](https://www.bestpractices.dev/projects/7996) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). +You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability @@ -20,14 +21,15 @@ Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. -Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +Sarama follows semantic versioning and provides API stability via the standard Go +[module version numbering](https://go.dev/doc/modules/version-numbers) scheme. + A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). -- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! diff --git a/vendor/github.com/IBM/sarama/SECURITY.md b/vendor/github.com/IBM/sarama/SECURITY.md new file mode 100644 index 0000000000000..b2f6e61fe7521 --- /dev/null +++ b/vendor/github.com/IBM/sarama/SECURITY.md @@ -0,0 +1,11 @@ +# Security + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new). + +See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions. + +Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages. diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile similarity index 100% rename from vendor/github.com/Shopify/sarama/Vagrantfile rename to vendor/github.com/IBM/sarama/Vagrantfile diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_bindings.go rename to vendor/github.com/IBM/sarama/acl_bindings.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/acl_create_request.go rename to vendor/github.com/IBM/sarama/acl_create_request.go index 449102f74a7cb..e581c984a9032 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/IBM/sarama/acl_create_request.go @@ -51,6 +51,10 @@ func (c *CreateAclsRequest) headerVersion() int16 { return 1 } +func (c *CreateAclsRequest) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 1 +} + func (c *CreateAclsRequest) requiredVersion() KafkaVersion { switch c.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/acl_create_response.go rename to vendor/github.com/IBM/sarama/acl_create_response.go index 21d6c340cc5d6..d123ba86316a0 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/IBM/sarama/acl_create_response.go @@ -4,6 +4,7 @@ import "time" // CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { + Version int16 ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse } @@ -52,15 +53,28 @@ func (c *CreateAclsResponse) key() int16 { } func (c *CreateAclsResponse) version() int16 { - return 0 + return c.Version } func (c *CreateAclsResponse) headerVersion() int16 { return 0 } +func (c *CreateAclsResponse) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 1 +} + func (c *CreateAclsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *CreateAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime } // AclCreationResponse is an acl creation response type diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/acl_delete_request.go rename to vendor/github.com/IBM/sarama/acl_delete_request.go index 5e5c03bc2da29..abeb4425e75d9 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/IBM/sarama/acl_delete_request.go @@ -52,6 +52,10 @@ func (d *DeleteAclsRequest) headerVersion() int16 { return 1 } +func (d *DeleteAclsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/acl_delete_response.go rename to vendor/github.com/IBM/sarama/acl_delete_response.go index cd33749d5e564..2e2850b32ad1a 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/IBM/sarama/acl_delete_response.go @@ -60,8 +60,21 @@ func (d *DeleteAclsResponse) headerVersion() int16 { return 0 } +func (d *DeleteAclsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *DeleteAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime } // FilterResponse is a filter response type diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go similarity index 82% rename from vendor/github.com/Shopify/sarama/acl_describe_request.go rename to vendor/github.com/IBM/sarama/acl_describe_request.go index e0fe9023a28b4..7d65bef14b9bb 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/IBM/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -// DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter @@ -29,6 +29,10 @@ func (d *DescribeAclsRequest) headerVersion() int16 { return 1 } +func (d *DescribeAclsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go similarity index 90% rename from vendor/github.com/Shopify/sarama/acl_describe_response.go rename to vendor/github.com/IBM/sarama/acl_describe_response.go index 3255fd48571a8..f89a53b6624b1 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/IBM/sarama/acl_describe_response.go @@ -81,6 +81,10 @@ func (d *DescribeAclsResponse) headerVersion() int16 { return 0 } +func (d *DescribeAclsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: @@ -89,3 +93,7 @@ func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } } + +func (r *DescribeAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_filter.go rename to vendor/github.com/IBM/sarama/acl_filter.go diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_types.go rename to vendor/github.com/IBM/sarama/acl_types.go index c3ba8ddcf6448..62bb5342ae0f1 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/IBM/sarama/acl_types.go @@ -60,7 +60,7 @@ func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +// UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ @@ -114,7 +114,7 @@ func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ @@ -166,7 +166,7 @@ func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ @@ -217,7 +217,7 @@ func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go similarity index 80% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go index a96af9341788e..6d3df9bedce78 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go @@ -2,6 +2,7 @@ package sarama // AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -45,13 +46,26 @@ func (a *AddOffsetsToTxnRequest) key() int16 { } func (a *AddOffsetsToTxnRequest) version() int16 { - return 0 + return a.Version } func (a *AddOffsetsToTxnRequest) headerVersion() int16 { return 1 } +func (a *AddOffsetsToTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_7_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go similarity index 72% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go index bb61973d16b6c..136460508a511 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go @@ -6,6 +6,7 @@ import ( // AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { + Version int16 ThrottleTime time.Duration Err KError } @@ -37,13 +38,30 @@ func (a *AddOffsetsToTxnResponse) key() int16 { } func (a *AddOffsetsToTxnResponse) version() int16 { - return 0 + return a.Version } func (a *AddOffsetsToTxnResponse) headerVersion() int16 { return 0 } +func (a *AddOffsetsToTxnResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_7_0_0 + } +} + +func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go similarity index 83% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go index 57ecf64884de9..3e2c63c64e54a 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go @@ -1,7 +1,8 @@ package sarama -// AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -69,13 +70,24 @@ func (a *AddPartitionsToTxnRequest) key() int16 { } func (a *AddPartitionsToTxnRequest) version() int16 { - return 0 + return a.Version } func (a *AddPartitionsToTxnRequest) headerVersion() int16 { return 1 } +func (a *AddPartitionsToTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go similarity index 85% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go index 0989565076764..8ef0a2a2c4a31 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go @@ -6,6 +6,7 @@ import ( // AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { + Version int16 ThrottleTime time.Duration Errors map[string][]*PartitionError } @@ -34,6 +35,7 @@ func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { } func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + a.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -76,15 +78,30 @@ func (a *AddPartitionsToTxnResponse) key() int16 { } func (a *AddPartitionsToTxnResponse) version() int16 { - return 0 + return a.Version } func (a *AddPartitionsToTxnResponse) headerVersion() int16 { return 0 } +func (a *AddPartitionsToTxnResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } // PartitionError is a partition error type diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go similarity index 87% rename from vendor/github.com/Shopify/sarama/admin.go rename to vendor/github.com/IBM/sarama/admin.go index a334daff553ff..dcf1d7659cc4c 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -196,9 +196,9 @@ func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } -// isErrNoController returns `true` if the given error type unwraps to an +// isErrNotController returns `true` if the given error type unwraps to an // `ErrNotController` response from Kafka -func isErrNoController(err error) bool { +func isErrNotController(err error) bool { return errors.Is(err, ErrNotController) } @@ -207,19 +207,17 @@ func isErrNoController(err error) bool { // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { - var err error - for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { - err = fn() - if err == nil || !retryable(err) { + for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { + err := fn() + attemptsRemaining-- + if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", - ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) - continue } - return err } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { @@ -240,14 +238,18 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO Timeout: ca.conf.Admin.Timeout, } - if ca.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 1 - } - if ca.conf.Version.IsAtLeast(V1_0_0_0) { + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 3 is the same as version 2 (brokers response before throttling) + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 2 is the same as version 1 (response has ThrottleTime) request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_10_2_0) { + // Version 1 adds validateOnly. + request.Version = 1 } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -275,13 +277,19 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, err - } - - request := NewMetadataRequest(ca.conf.Version, topics) - response, err := controller.GetMetadata(request) + var response *MetadataResponse + err = ca.retryOnError(isErrNotController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } + request := NewMetadataRequest(ca.conf.Version, topics) + response, err = controller.GetMetadata(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, err } @@ -289,13 +297,20 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, int32(0), err - } + var response *MetadataResponse + err = ca.retryOnError(isErrNotController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } - request := NewMetadataRequest(ca.conf.Version, nil) - response, err := controller.GetMetadata(request) + request := NewMetadataRequest(ca.conf.Version, nil) + response, err = controller.GetMetadata(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, int32(0), err } @@ -389,6 +404,7 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { topicDetails.ConfigEntries = make(map[string]*string) for _, entry := range resource.Configs { + entry := entry // only include non-default non-sensitive config // (don't actually think topic config will ever be sensitive) if entry.Default || entry.Sensitive { @@ -413,11 +429,16 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { Timeout: ca.conf.Admin.Timeout, } - if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Versions 0, 1, 2, and 3 are the same. + if ca.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -457,8 +478,11 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ Timeout: ca.conf.Admin.Timeout, ValidateOnly: validateOnly, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -499,7 +523,7 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ request.AddBlock(topic, int32(i), assignment[i]) } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -545,13 +569,20 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) - b, err := ca.Controller() - if err != nil { - return nil, err - } - _ = b.Open(ca.client.Config()) + var rsp *ListPartitionReassignmentsResponse + err = ca.retryOnError(isErrNotController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) - rsp, err := b.ListPartitionReassignments(request) + rsp, err = b.ListPartitionReassignments(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err == nil && rsp != nil { return rsp.TopicStatus, nil @@ -587,6 +618,9 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i Topics: topics, Timeout: ca.conf.Admin.Timeout, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } rsp, err := broker.DeleteRecords(request) if err != nil { errs = append(errs, err) @@ -666,11 +700,8 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, for _, rspResource := range rsp.Resources { if rspResource.Name == resource.Name { - if rspResource.ErrorMsg != "" { - return nil, errors.New(rspResource.ErrorMsg) - } if rspResource.ErrorCode != 0 { - return nil, KError(rspResource.ErrorCode) + return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) @@ -692,6 +723,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string Resources: resources, ValidateOnly: validateOnly, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } var ( b *Broker @@ -721,11 +755,8 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string for _, rspResource := range rsp.Resources { if rspResource.Name == name { - if rspResource.ErrorMsg != "" { - return errors.New(rspResource.ErrorMsg) - } if rspResource.ErrorCode != 0 { - return KError(rspResource.ErrorCode) + return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } } } @@ -891,8 +922,19 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } - if ca.conf.Version.IsAtLeast(V2_3_0_0) { + + if ca.conf.Version.IsAtLeast(V2_4_0_0) { + // Starting in version 4, the response will include group.instance.id info for members. describeReq.Version = 4 + } else if ca.conf.Version.IsAtLeast(V2_3_0_0) { + // Starting in version 3, authorized operations can be requested. + describeReq.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 2 is the same as version 0. + describeReq.Version = 2 + } else if ca.conf.Version.IsAtLeast(V1_1_0_0) { + // Version 1 is the same as version 0. + describeReq.Version = 1 } response, err := broker.DescribeGroups(describeReq) if err != nil { @@ -919,7 +961,22 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened - response, err := b.ListGroups(&ListGroupsRequest{}) + request := &ListGroupsRequest{} + if ca.conf.Version.IsAtLeast(V2_6_0_0) { + // Version 4 adds the StatesFilter field (KIP-518). + request.Version = 4 + } else if ca.conf.Version.IsAtLeast(V2_4_0_0) { + // Version 3 is the first flexible version. + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 2 is the same as version 0. + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + + response, err := b.ListGroups(request) if err != nil { errChan <- err return @@ -955,16 +1012,7 @@ func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions m return nil, err } - request := &OffsetFetchRequest{ - ConsumerGroup: group, - partitions: topicPartitions, - } - - if ca.conf.Version.IsAtLeast(V0_10_2_0) { - request.Version = 2 - } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { - request.Version = 1 - } + request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) return coordinator.FetchOffset(request) } @@ -1006,6 +1054,9 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { request := &DeleteGroupsRequest{ Groups: []string{group}, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } resp, err := coordinator.DeleteGroups(request) if err != nil { @@ -1043,7 +1094,11 @@ func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32 defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened - response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + request := &DescribeLogDirsRequest{} + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + response, err := b.DescribeLogDirs(request) if err != nil { errChan <- err return @@ -1114,12 +1169,16 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU Upsertions: u, } - b, err := ca.Controller() - if err != nil { - return nil, err - } + var rsp *AlterUserScramCredentialsResponse + err := ca.retryOnError(isErrNotController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.AlterUserScramCredentials(req) + rsp, err = b.AlterUserScramCredentials(req) + return err + }) if err != nil { return nil, err } @@ -1190,6 +1249,10 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie } func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { + if !ca.conf.Version.IsAtLeast(V2_4_0_0) { + return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") + } + controller, err := ca.client.Coordinator(groupId) if err != nil { return nil, err diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go similarity index 97% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_request.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_request.go index f528512d024aa..a7fa0cbd139a5 100644 --- a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go +++ b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go @@ -12,6 +12,7 @@ package sarama // validate_only => BOOLEAN type AlterClientQuotasRequest struct { + Version int16 Entries []AlterClientQuotasEntry // The quota configuration entries to alter. ValidateOnly bool // Whether the alteration should be validated, but not performed. } @@ -182,13 +183,17 @@ func (a *AlterClientQuotasRequest) key() int16 { } func (a *AlterClientQuotasRequest) version() int16 { - return 0 + return a.Version } func (a *AlterClientQuotasRequest) headerVersion() int16 { return 1 } +func (a *AlterClientQuotasRequest) isValidVersion() bool { + return a.Version == 0 +} + func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_response.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_response.go index ccd27d5f5ece9..cce997cae2851 100644 --- a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go +++ b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go @@ -14,6 +14,7 @@ import ( // entity_name => NULLABLE_STRING type AlterClientQuotasResponse struct { + Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered. } @@ -133,13 +134,21 @@ func (a *AlterClientQuotasResponse) key() int16 { } func (a *AlterClientQuotasResponse) version() int16 { - return 0 + return a.Version } func (a *AlterClientQuotasResponse) headerVersion() int16 { return 0 } +func (a *AlterClientQuotasResponse) isValidVersion() bool { + return a.Version == 0 +} + func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } + +func (r *AlterClientQuotasResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/alter_configs_request.go rename to vendor/github.com/IBM/sarama/alter_configs_request.go index 8b94b1f3fe406..ee1ab64458293 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/IBM/sarama/alter_configs_request.go @@ -2,6 +2,7 @@ package sarama // AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { + Version int16 Resources []*AlterConfigsResource ValidateOnly bool } @@ -114,13 +115,24 @@ func (a *AlterConfigsRequest) key() int16 { } func (a *AlterConfigsRequest) version() int16 { - return 0 + return a.Version } func (a *AlterConfigsRequest) headerVersion() int16 { return 1 } +func (a *AlterConfigsRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 1 +} + func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go similarity index 78% rename from vendor/github.com/Shopify/sarama/alter_configs_response.go rename to vendor/github.com/IBM/sarama/alter_configs_response.go index 84cd86c72920d..d8b70e3718d11 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/IBM/sarama/alter_configs_response.go @@ -1,13 +1,30 @@ package sarama -import "time" +import ( + "fmt" + "time" +) // AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { + Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } +type AlterConfigError struct { + Err KError + ErrMsg string +} + +func (c *AlterConfigError) Error() string { + text := c.Err.Error() + if c.ErrMsg != "" { + text = fmt.Sprintf("%s - %s", text, c.ErrMsg) + } + return text +} + // AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 @@ -100,17 +117,32 @@ func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) e } func (a *AlterConfigsResponse) key() int16 { - return 32 + return 33 } func (a *AlterConfigsResponse) version() int16 { - return 0 + return a.Version } func (a *AlterConfigsResponse) headerVersion() int16 { return 0 } +func (a *AlterConfigsResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 1 +} + func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_0_0_0 + } +} + +func (r *AlterConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go index f0a2f9dd59b11..f898f87a20704 100644 --- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go +++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go @@ -113,6 +113,10 @@ func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { return 2 } +func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go similarity index 93% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go index b3f9a15fe7f6e..1ee56b40ee3ca 100644 --- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go +++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type alterPartitionReassignmentsErrorBlock struct { errorCode KError errorMessage *string @@ -152,6 +154,14 @@ func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { return 1 } +func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go similarity index 97% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go index 0530d8946a8fa..f29f164cffb76 100644 --- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go +++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go @@ -137,6 +137,10 @@ func (r *AlterUserScramCredentialsRequest) headerVersion() int16 { return 2 } +func (r *AlterUserScramCredentialsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go index 31e167b5eb79b..75eac0cec159b 100644 --- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go +++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go @@ -89,6 +89,14 @@ func (r *AlterUserScramCredentialsResponse) headerVersion() int16 { return 2 } +func (r *AlterUserScramCredentialsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } + +func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go similarity index 89% rename from vendor/github.com/Shopify/sarama/api_versions_request.go rename to vendor/github.com/IBM/sarama/api_versions_request.go index e5b3baf646be9..f94174daf2acb 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/IBM/sarama/api_versions_request.go @@ -57,13 +57,21 @@ func (r *ApiVersionsRequest) headerVersion() int16 { return 1 } +func (r *ApiVersionsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return V0_10_0_0 case 3: return V2_4_0_0 - default: + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: return V0_10_0_0 + default: + return V2_4_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/api_versions_response.go rename to vendor/github.com/IBM/sarama/api_versions_response.go index ade911c597624..457c79a95ba75 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/IBM/sarama/api_versions_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + // ApiVersionsResponseKey contains the APIs supported by the broker. type ApiVersionsResponseKey struct { // Version defines the protocol version to use for encode and decode @@ -144,13 +146,25 @@ func (r *ApiVersionsResponse) headerVersion() int16 { return 0 } +func (r *ApiVersionsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return V0_10_0_0 case 3: return V2_4_0_0 - default: + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: return V0_10_0_0 + default: + return V2_4_0_0 } } + +func (r *ApiVersionsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/async_producer.go rename to vendor/github.com/IBM/sarama/async_producer.go index 50f226f8eb9e1..a6fa3d4a2ec36 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -20,7 +20,6 @@ import ( // leaks and message lost: it will not be garbage-collected automatically when it passes // out of scope and buffered messages may not be flushed. type AsyncProducer interface { - // AsyncClose triggers a shutdown of the producer. The shutdown has completed // when both the Errors and Successes channels have been closed. When calling // AsyncClose, you *must* continue to read from those channels in order to @@ -50,7 +49,7 @@ type AsyncProducer interface { // errors to be returned. Errors() <-chan *ProducerError - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. @@ -366,17 +365,17 @@ func (p *asyncProducer) Close() error { }) } - var errors ProducerErrors + var pErrs ProducerErrors if p.conf.Producer.Return.Errors { for event := range p.errors { - errors = append(errors, event) + pErrs = append(pErrs, event) } } else { <-p.errors } - if len(errors) > 0 { - return errors + if len(pErrs) > 0 { + return pErrs } return nil } @@ -450,8 +449,10 @@ func (p *asyncProducer) dispatcher() { p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) continue } - if msg.ByteSize(version) > p.conf.Producer.MaxMessageBytes { - p.returnError(msg, ErrMessageSizeTooLarge) + + size := msg.ByteSize(version) + if size > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes))) continue } @@ -1100,7 +1101,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1133,7 +1134,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go similarity index 94% rename from vendor/github.com/Shopify/sarama/balance_strategy.go rename to vendor/github.com/IBM/sarama/balance_strategy.go index 4594df6f6d64b..30d41779c1e9b 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -57,35 +57,42 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- -// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// NewBalanceStrategyRange returns a range balance strategy, +// which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // // Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2): // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} -// M2: {T2: [3, 4, 5], T2: [3, 4, 5]} -var BalanceStrategyRange = &balanceStrategy{ - name: RangeBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - partitionsPerConsumer := len(partitions) / len(memberIDs) - consumersWithExtraPartition := len(partitions) % len(memberIDs) - - sort.Strings(memberIDs) - - for i, memberID := range memberIDs { - min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) - extra := 0 - if i < consumersWithExtraPartition { - extra = 1 +// M2: {T1: [3, 4, 5], T2: [3, 4, 5]} +func NewBalanceStrategyRange() BalanceStrategy { + return &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) + + for i, memberID := range memberIDs { + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra + plan.Add(memberID, topic, partitions[min:max]...) } - max := min + partitionsPerConsumer + extra - plan.Add(memberID, topic, partitions[min:max]...) - } - }, + }, + } } -// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// Deprecated: use NewBalanceStrategyRange to avoid data race issue +var BalanceStrategyRange = NewBalanceStrategyRange() + +// NewBalanceStrategySticky returns a sticky balance strategy, +// which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // @@ -97,13 +104,18 @@ var BalanceStrategyRange = &balanceStrategy{ // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} -var BalanceStrategySticky = &stickyBalanceStrategy{} +func NewBalanceStrategySticky() BalanceStrategy { + return &stickyBalanceStrategy{} +} + +// Deprecated: use NewBalanceStrategySticky to avoid data race issue +var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { - name string coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) + name string } // Name implements BalanceStrategy. @@ -171,10 +183,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state - isFreshAssignment := false - if len(currentAssignment) == 0 { - isFreshAssignment = true - } + isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) @@ -281,10 +290,7 @@ func strsContains(s []string, value string) bool { // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { - initializing := false - if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { - initializing = true - } + initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { @@ -337,11 +343,17 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// NewBalanceStrategyRoundRobin returns a round-robin balance strategy, +// which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] -var BalanceStrategyRoundRobin = new(roundRobinBalancer) +func NewBalanceStrategyRoundRobin() BalanceStrategy { + return new(roundRobinBalancer) +} + +// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue +var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} @@ -414,8 +426,8 @@ func (tp *topicAndPartition) comparedValue() string { } type memberAndTopic struct { - memberID string topics map[string]struct{} + memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { @@ -681,11 +693,8 @@ func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, par } heap.Init(&pq) - for { - // loop until no consumer-group members remain - if pq.Len() == 0 { - break - } + // loop until no consumer-group members remain + for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time @@ -995,20 +1004,21 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur } for _, pair := range pairs { - if pair.SrcMemberID == src { - // create a deep copy of the pairs, excluding the current pair - reducedSet := make([]consumerPair, len(pairs)-1) - i := 0 - for _, p := range pairs { - if p != pair { - reducedSet[i] = pair - i++ - } + if pair.SrcMemberID != src { + continue + } + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ } - - currentPath = append(currentPath, pair.SrcMemberID) - return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) } return currentPath, false } @@ -1106,9 +1116,9 @@ type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { - // order asssignment priority queue in descending order using assignment-count/member-id + // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { - return strings.Compare(pq[i].id, pq[j].id) > 0 + return pq[i].id > pq[j].id } return len(pq[i].assignments) > len(pq[j].assignments) } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go similarity index 95% rename from vendor/github.com/Shopify/sarama/broker.go rename to vendor/github.com/IBM/sarama/broker.go index d049e9b47cf9d..d0d5b87b8b9c0 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -58,6 +58,9 @@ type Broker struct { kerberosAuthenticator GSSAPIKerberosAuth clientSessionReauthenticationTimeMs int64 + + throttleTimer *time.Timer + throttleTimerLock sync.Mutex } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -175,7 +178,9 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() - b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + if b.metricRegistry == nil { + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + } go withRecover(func() { defer func() { @@ -256,6 +261,7 @@ func (b *Broker) Open(conf *Config) error { b.connErr = b.authenticateViaSASLv1() if b.connErr != nil { close(b.responses) + <-b.done err = b.conn.Close() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) @@ -367,6 +373,7 @@ func (b *Broker) Rack() string { // GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) + response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { @@ -429,12 +436,16 @@ type ProduceCallback func(*ProduceResponse, error) // // Make sure not to Close the broker in the callback as it will lead to a deadlock. func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error { - metricRegistry := b.metricRegistry + b.lock.Lock() + defer b.lock.Unlock() + needAcks := request.RequiredAcks != NoResponse // Use a nil promise when no acks is required var promise *responsePromise if needAcks { + metricRegistry := b.metricRegistry + // Create ProduceResponse early to provide the header version res := new(ProduceResponse) promise = &responsePromise{ @@ -453,15 +464,13 @@ func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error return } - // Wellformed response - b.updateThrottleMetric(res.ThrottleTime) + // Well-formed response + b.handleThrottledResponse(res) cb(res, nil) }, } } - b.lock.Lock() - defer b.lock.Unlock() return b.sendWithPromise(request, promise) } @@ -477,7 +486,6 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) - b.updateThrottleMetric(response.ThrottleTime) } if err != nil { @@ -584,6 +592,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error // ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) + response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { @@ -942,7 +951,7 @@ func (b *Broker) write(buf []byte) (n int, err error) { return b.conn.Write(buf) } -// b.lock must be haled by caller +// b.lock must be held by caller func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { var promise *responsePromise if promiseResponse { @@ -998,6 +1007,9 @@ func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { return err } + // check and wait if throttled + b.waitIfThrottled() + requestTime := time.Now() // Will be decremented in responseReceiver (except error or request with NoResponse) b.addRequestInFlightMetrics(1) @@ -1040,7 +1052,14 @@ func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { return nil } - return handleResponsePromise(req, res, promise, b.metricRegistry) + err = handleResponsePromise(req, res, promise, b.metricRegistry) + if err != nil { + return err + } + if res != nil { + b.handleThrottledResponse(res) + } + return nil } func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error { @@ -1058,7 +1077,12 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } - host, err := pd.getString() + var host string + if version < 9 { + host, err = pd.getString() + } else { + host, err = pd.getCompactString() + } if err != nil { return err } @@ -1068,11 +1092,13 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } - if version >= 1 { + if version >= 1 && version < 9 { b.rack, err = pd.getNullableString() - if err != nil { - return err - } + } else if version >= 9 { + b.rack, err = pd.getCompactNullableString() + } + if err != nil { + return err } b.addr = net.JoinHostPort(host, fmt.Sprint(port)) @@ -1080,6 +1106,13 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } + if version >= 9 { + _, err := pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + return nil } @@ -1096,7 +1129,11 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { pe.putInt32(b.id) - err = pe.putString(host) + if version < 9 { + err = pe.putString(host) + } else { + err = pe.putCompactString(host) + } if err != nil { return err } @@ -1104,12 +1141,20 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { pe.putInt32(int32(port)) if version >= 1 { - err = pe.putNullableString(b.rack) + if version < 9 { + err = pe.putNullableString(b.rack) + } else { + err = pe.putNullableCompactString(b.rack) + } if err != nil { return err } } + if version >= 9 { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -1439,7 +1484,7 @@ func (b *Broker) sendAndReceiveSASLSCRAMv0() error { length := len(msg) authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte(msg)) + copy(authBytes[4:], msg) _, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(length + 4) if err != nil { @@ -1633,16 +1678,52 @@ func (b *Broker) updateProtocolMetrics(rb protocolBody) { } } -func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { - if throttleTime != time.Duration(0) { - DebugLogger.Printf( - "producer/broker/%d ProduceResponse throttled %v\n", - b.ID(), throttleTime) - if b.brokerThrottleTime != nil { - throttleTimeInMs := int64(throttleTime / time.Millisecond) - b.brokerThrottleTime.Update(throttleTimeInMs) +type throttleSupport interface { + throttleTime() time.Duration +} + +func (b *Broker) handleThrottledResponse(resp protocolBody) { + throttledResponse, ok := resp.(throttleSupport) + if !ok { + return + } + throttleTime := throttledResponse.throttleTime() + if throttleTime == time.Duration(0) { + return + } + DebugLogger.Printf( + "broker/%d %T throttled %v\n", b.ID(), resp, throttleTime) + b.setThrottle(throttleTime) + b.updateThrottleMetric(throttleTime) +} + +func (b *Broker) setThrottle(throttleTime time.Duration) { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() + if b.throttleTimer != nil { + // if there is an existing timer stop/clear it + if !b.throttleTimer.Stop() { + <-b.throttleTimer.C } } + b.throttleTimer = time.NewTimer(throttleTime) +} + +func (b *Broker) waitIfThrottled() { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() + if b.throttleTimer != nil { + DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID()) + <-b.throttleTimer.C + b.throttleTimer = nil + } +} + +func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { + if b.brokerThrottleTime != nil { + throttleTimeInMs := int64(throttleTime / time.Millisecond) + b.brokerThrottleTime.Update(throttleTimeInMs) + } } func (b *Broker) registerMetrics() { diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/IBM/sarama/client.go similarity index 86% rename from vendor/github.com/Shopify/sarama/client.go rename to vendor/github.com/IBM/sarama/client.go index f7872a1b3cbf8..2decba7c55399 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -1,13 +1,18 @@ package sarama import ( + "context" "errors" "math" "math/rand" + "net" "sort" + "strings" "sync" "sync/atomic" "time" + + "golang.org/x/net/proxy" ) // Client is a generic Kafka client. It manages connections to one or more Kafka brokers. @@ -50,7 +55,7 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) - // LeaderAndEpoch returns the the leader and its epoch for the current + // LeaderAndEpoch returns the leader and its epoch for the current // topic/partition, as determined by querying the cluster metadata. LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) @@ -132,10 +137,10 @@ const ( ) type client struct { - // updateMetaDataMs stores the time at which metadata was lasted updated. + // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 - updateMetaDataMs int64 + updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater @@ -158,7 +163,6 @@ type client struct { cachedPartitionsResults map[string][maxPartitionIndex][]int32 lock sync.RWMutex // protects access to the maps that hold cluster state. - } // NewClient creates a new Client. It connects to one of the given broker addresses @@ -179,6 +183,13 @@ func NewClient(addrs []string, conf *Config) (Client, error) { return nil, ConfigurationError("You must provide at least one broker address") } + if strings.Contains(addrs[0], ".servicebus.windows.net") { + if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) { + Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility") + conf.Version = V1_0_0_0 + } + } + client := &client{ conf: conf, closer: make(chan none), @@ -191,6 +202,14 @@ func NewClient(addrs []string, conf *Config) (Client, error) { transactionCoordinators: make(map[string]int32), } + if conf.Net.ResolveCanonicalBootstrapServers { + var err error + addrs, err = client.resolveCanonicalNames(addrs) + if err != nil { + return nil, err + } + } + client.randomizeSeedBrokers(addrs) if conf.Metadata.Full { @@ -239,12 +258,26 @@ func (client *client) Broker(brokerID int32) (*Broker, error) { } func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + // FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go? brokerErrors := make([]error, 0) - for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { - var response *InitProducerIDResponse - req := &InitProducerIDRequest{} + for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { + request := &InitProducerIDRequest{} + + if client.conf.Version.IsAtLeast(V2_7_0_0) { + // Version 4 adds the support for new error code PRODUCER_FENCED. + request.Version = 4 + } else if client.conf.Version.IsAtLeast(V2_5_0_0) { + // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error + request.Version = 3 + } else if client.conf.Version.IsAtLeast(V2_4_0_0) { + // Version 2 is the first flexible version. + request.Version = 2 + } else if client.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } - response, err := broker.InitProducerID(req) + response, err := broker.InitProducerID(request) if err == nil { return response, nil } else { @@ -486,16 +519,16 @@ func (client *client) RefreshBrokers(addrs []string) error { defer client.lock.Unlock() for _, broker := range client.brokers { - _ = broker.Close() - delete(client.brokers, broker.ID()) + safeAsyncClose(broker) } + client.brokers = make(map[int32]*Broker) for _, broker := range client.seedBrokers { - _ = broker.Close() + safeAsyncClose(broker) } for _, broker := range client.deadSeeds { - _ = broker.Close() + safeAsyncClose(broker) } client.seedBrokers = nil @@ -513,7 +546,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return @@ -527,17 +560,17 @@ func (client *client) RefreshMetadata(topics ...string) error { return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) } -func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { +func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) { if client.Closed() { return -1, ErrClosedClient } - offset, err := client.getOffset(topic, partitionID, time) + offset, err := client.getOffset(topic, partitionID, timestamp) if err != nil { if err := client.RefreshMetadata(topic); err != nil { return -1, err } - return client.getOffset(topic, partitionID, time) + return client.getOffset(topic, partitionID, timestamp) } return offset, err @@ -730,22 +763,21 @@ func (client *client) registerBroker(broker *Broker) { } } -// deregisterBroker removes a broker from the seedsBroker list, and if it's -// not the seedbroker, removes it from brokers map completely. +// deregisterBroker removes a broker from the broker list, and if it's +// not in the broker list, removes it from seedBrokers. func (client *client) deregisterBroker(broker *Broker) { client.lock.Lock() defer client.lock.Unlock() + _, ok := client.brokers[broker.ID()] + if ok { + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + return + } if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { client.deadSeeds = append(client.deadSeeds, broker) client.seedBrokers = client.seedBrokers[1:] - } else { - // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, - // but we really shouldn't have to; once that loop is made better this case can be - // removed, and the function generally can be renamed from `deregisterBroker` to - // `nextSeedBroker` or something - DebugLogger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) - delete(client.brokers, broker.ID()) } } @@ -758,33 +790,12 @@ func (client *client) resurrectDeadBrokers() { client.deadSeeds = nil } -func (client *client) anyBroker() *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - - // not guaranteed to be random *or* deterministic - for _, broker := range client.brokers { - _ = broker.Open(client.conf) - return broker - } - - return nil -} - +// LeastLoadedBroker returns the broker with the least pending requests. +// Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers. func (client *client) LeastLoadedBroker() *Broker { client.lock.RLock() defer client.lock.RUnlock() - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - var leastLoadedBroker *Broker pendingRequests := math.MaxInt for _, broker := range client.brokers { @@ -793,10 +804,16 @@ func (client *client) LeastLoadedBroker() *Broker { leastLoadedBroker = broker } } - if leastLoadedBroker != nil { _ = leastLoadedBroker.Open(client.conf) + return leastLoadedBroker + } + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] } + return leastLoadedBroker } @@ -879,17 +896,29 @@ func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, in return nil, -1, ErrUnknownTopicOrPartition } -func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { +func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) { broker, err := client.Leader(topic, partitionID) if err != nil { return -1, err } request := &OffsetRequest{} - if client.conf.Version.IsAtLeast(V0_10_1_0) { + if client.conf.Version.IsAtLeast(V2_1_0_0) { + // Version 4 adds the current leader epoch, which is used for fencing. + request.Version = 4 + } else if client.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 3 is the same as version 2. + request.Version = 3 + } else if client.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 2 adds the isolation level, which is used for transactional reads. + request.Version = 2 + } else if client.conf.Version.IsAtLeast(V0_10_1_0) { + // Version 1 removes MaxNumOffsets. From this version forward, only a single + // offset can be returned. request.Version = 1 } - request.AddBlock(topic, partitionID, time, 1) + + request.AddBlock(topic, partitionID, timestamp, 1) response, err := broker.GetAvailableOffsets(request) if err != nil { @@ -975,20 +1004,21 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, time.Sleep(backoff) } - t := atomic.LoadInt64(&client.updateMetaDataMs) - if time.Since(time.Unix(t/1e3, 0)) < backoff { + t := atomic.LoadInt64(&client.updateMetadataMs) + if time.Since(time.UnixMilli(t)) < backoff { return err } + attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) - return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } - broker := client.anyBroker() + broker := client.LeastLoadedBroker() brokerErrors := make([]error, 0) - for ; broker != nil && !pastDeadline(0); broker = client.anyBroker() { + for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() { allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) @@ -999,15 +1029,19 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, req := NewMetadataRequest(client.conf.Version, topics) req.AllowAutoTopicCreation = allowAutoTopicCreation - t := atomic.LoadInt64(&client.updateMetaDataMs) - if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { - return nil - } + atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError var packetEncodingError PacketEncodingError if err == nil { + // When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924). + if len(response.Brokers) == 0 { + Logger.Println("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr) + _ = broker.Close() + client.deregisterBroker(broker) + continue + } allKnownMetaData := len(topics) == 0 // valid response, use it shouldRetry, err := client.updateMetadata(response, allKnownMetaData) @@ -1160,24 +1194,30 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) + attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } brokerErrors := make([]error, 0) - for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { + for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr()) request := new(FindCoordinatorRequest) request.CoordinatorKey = coordinatorKey request.CoordinatorType = coordinatorType + // Version 1 adds KeyType. if client.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } + // Version 2 is the same as version 1. + if client.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } response, err := broker.FindCoordinator(request) if err != nil { @@ -1228,6 +1268,53 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo return retry(Wrap(ErrOutOfBrokers, brokerErrors...)) } +func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) { + ctx := context.Background() + + dialer := client.Config().getDialer() + resolver := net.Resolver{ + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + // dial func should only be called once, so switching within is acceptable + switch d := dialer.(type) { + case proxy.ContextDialer: + return d.DialContext(ctx, network, address) + default: + // we have no choice but to ignore the context + return d.Dial(network, address) + } + }, + } + + canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go + for _, addr := range addrs { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return nil, err // message includes addr + } + + ips, err := resolver.LookupHost(ctx, host) + if err != nil { + return nil, err // message includes host + } + for _, ip := range ips { + ptrs, err := resolver.LookupAddr(ctx, ip) + if err != nil { + return nil, err // message includes ip + } + + // unlike the Java client, we do not further check that PTRs resolve + ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI + canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{} + } + } + + addrs = make([]string, 0, len(canonicalAddrs)) + for addr := range canonicalAddrs { + addrs = append(addrs, addr) + } + return addrs, nil +} + // nopCloserClient embeds an existing Client, but disables // the Close method (yet all other methods pass // through unchanged). This is for use in larger structs diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go similarity index 99% rename from vendor/github.com/Shopify/sarama/compress.go rename to vendor/github.com/IBM/sarama/compress.go index 504007a49b144..a7bd525bc73ce 100644 --- a/vendor/github.com/Shopify/sarama/compress.go +++ b/vendor/github.com/IBM/sarama/compress.go @@ -2,11 +2,11 @@ package sarama import ( "bytes" - "compress/gzip" "fmt" "sync" snappy "github.com/eapache/go-xerial-snappy" + "github.com/klauspost/compress/gzip" "github.com/pierrec/lz4/v4" ) diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/IBM/sarama/config.go similarity index 95% rename from vendor/github.com/Shopify/sarama/config.go rename to vendor/github.com/IBM/sarama/config.go index b07034434cb8e..f2f197887c97d 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -1,7 +1,6 @@ package sarama import ( - "compress/gzip" "crypto/tls" "fmt" "io" @@ -9,13 +8,16 @@ import ( "regexp" "time" + "github.com/klauspost/compress/gzip" "github.com/rcrowley/go-metrics" "golang.org/x/net/proxy" ) const defaultClientID = "sarama" -var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) +// validClientID specifies the permitted characters for a client.id when +// connecting to Kafka versions before 1.0.0 (KIP-190) +var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { @@ -50,6 +52,15 @@ type Config struct { ReadTimeout time.Duration // How long to wait for a response. WriteTimeout time.Duration // How long to wait for a transmit. + // ResolveCanonicalBootstrapServers turns each bootstrap broker address + // into a set of IPs, then does a reverse lookup on each one to get its + // canonical hostname. This list of hostnames then replaces the + // original address list. Similar to the `client.dns.lookup` option in + // the JVM client, this is especially useful with GSSAPI, where it + // allows providing an alias record instead of individual broker + // hostnames. Defaults to false. + ResolveCanonicalBootstrapServers bool + TLS struct { // Whether or not to use TLS when connecting to the broker // (defaults to false). @@ -272,7 +283,6 @@ type Config struct { // Consumer is the namespace for configuration related to consuming messages, // used by the Consumer. Consumer struct { - // Group is the namespace for configuring consumer group. Group struct { Session struct { @@ -294,7 +304,7 @@ type Config struct { Interval time.Duration } Rebalance struct { - // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy @@ -302,7 +312,7 @@ type Config struct { // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. - // default: [BalanceStrategyRange] + // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. @@ -377,7 +387,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to @@ -505,7 +515,7 @@ func NewConfig() *Config { c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second c.Net.SASL.Handshake = true - c.Net.SASL.Version = SASLHandshakeV0 + c.Net.SASL.Version = SASLHandshakeV1 c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond @@ -513,7 +523,7 @@ func NewConfig() *Config { c.Metadata.Full = true c.Metadata.AllowAutoTopicCreation = true - c.Producer.MaxMessageBytes = 1000000 + c.Producer.MaxMessageBytes = 1024 * 1024 c.Producer.RequiredAcks = WaitForLocal c.Producer.Timeout = 10 * time.Second c.Producer.Partitioner = NewHashPartitioner @@ -539,7 +549,7 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second @@ -650,19 +660,26 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } - if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + switch c.Net.SASL.GSSAPI.AuthType { + case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } - } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + - " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + case KRB5_CCACHE_AUTH: + if c.Net.SASL.GSSAPI.CCachePath == "" { + return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") } - } else { - return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + default: + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } @@ -831,8 +848,11 @@ func (c *Config) Validate() error { switch { case c.ChannelBufferSize < 0: return ConfigurationError("ChannelBufferSize must be >= 0") - case !validID.MatchString(c.ClientID): - return ConfigurationError("ClientID is invalid") + } + + // only validate clientID locally for Kafka versions before KIP-190 was implemented + if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) { + return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID)) } return nil @@ -840,7 +860,7 @@ func (c *Config) Validate() error { func (c *Config) getDialer() proxy.Dialer { if c.Net.Proxy.Enable { - Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + Logger.Println("using proxy") return c.Net.Proxy.Dialer } else { return &net.Dialer{ diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go similarity index 100% rename from vendor/github.com/Shopify/sarama/config_resource_type.go rename to vendor/github.com/IBM/sarama/config_resource_type.go diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go similarity index 96% rename from vendor/github.com/Shopify/sarama/consumer.go rename to vendor/github.com/IBM/sarama/consumer.go index eb27df8d732de..60556a566d07c 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/IBM/sarama/consumer.go @@ -85,13 +85,13 @@ type Consumer interface { // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) - // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() - // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } @@ -920,7 +920,7 @@ func (bc *brokerConsumer) subscriptionManager() { } // subscriptionConsumer ensures we will get nil right away if no new subscriptions is available -// this is a the main loop that fetches Kafka messages +// this is the main loop that fetches Kafka messages func (bc *brokerConsumer) subscriptionConsumer() { for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) @@ -942,6 +942,7 @@ func (bc *brokerConsumer) subscriptionConsumer() { // if there isn't response, it means that not fetch was made // so we don't need to handle any response if response == nil { + time.Sleep(partitionConsumersBatchTimeout) continue } @@ -1067,20 +1068,35 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + // Version 1 is the same as version 0. if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { request.Version = 1 } + // Starting in Version 2, the requestor must be able to handle Kafka Log + // Message format version 1. if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } + // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in + // the request is now relevant. Partitions will be processed in the order + // they appear in the request. if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { request.Version = 3 request.MaxBytes = MaxResponseSize } + // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be + // able to handle Kafka log message format version 2. + // Version 5 adds LogStartOffset to indicate the earliest available offset of + // partition data that can be consumed. if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 4 + request.Version = 5 request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } + // Version 6 is the same as version 5. + if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 6 + } + // Version 7 adds incremental fetch request support. if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { request.Version = 7 // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 @@ -1089,9 +1105,17 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request.SessionID = 0 request.SessionEpoch = -1 } + // Version 8 is the same as version 7. + if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 8 + } + // Version 9 adds CurrentLeaderEpoch, as described in KIP-320. + // Version 10 indicates that we can use the ZStd compression algorithm, as + // described in KIP-110. if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { request.Version = 10 } + // Version 11 adds RackID for KIP-392 fetch from closest replica if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { request.Version = 11 request.RackID = bc.consumer.conf.RackID diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go similarity index 86% rename from vendor/github.com/Shopify/sarama/consumer_group.go rename to vendor/github.com/IBM/sarama/consumer_group.go index ecdbcfa687e5b..53b64dd3b88c2 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/IBM/sarama/consumer_group.go @@ -114,6 +114,9 @@ func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerG // necessary to call Close() on the underlying client when shutting down this consumer. // PLEASE NOTE: consumer groups can only re-use but not share clients. func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + if client == nil { + return nil, ConfigurationError("client must not be nil") + } // For clients passed in by the client, ensure we don't // call Close() on it. cli := &nopCloserClient{client} @@ -141,8 +144,8 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { userData: config.Consumer.Group.Member.UserData, metricRegistry: newCleanupRegistry(config.MetricRegistry), } - if client.Config().Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { - cg.groupInstanceId = &client.Config().Consumer.Group.InstanceId + if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { + cg.groupInstanceId = &config.Consumer.Group.InstanceId } return cg, nil } @@ -210,13 +213,11 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return err } - // loop check topic partition numbers changed - // will trigger rebalance when any topic partitions number had changed - // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine - go c.loopCheckPartitionNumbers(topics, sess) - - // Wait for session exit signal - <-sess.ctx.Done() + // Wait for session exit signal or Close() call + select { + case <-c.closed: + case <-sess.ctx.Done(): + } // Gracefully release session claims return sess.release(true) @@ -244,6 +245,8 @@ func (c *consumerGroup) ResumeAll() { func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { select { + case <-ctx.Done(): + return nil, ctx.Err() case <-c.closed: return nil, ErrClosedConsumerGroup case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): @@ -252,7 +255,10 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { - return c.retryNewSession(ctx, topics, handler, retries, true) + if retries <= 0 { + return nil, err + } + return c.retryNewSession(ctx, topics, handler, retries-1, true) } } @@ -260,6 +266,9 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha } func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } coordinator, err := c.client.Coordinator(c.groupID) if err != nil { if retries <= 0 { @@ -315,10 +324,12 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } return c.retryNewSession(ctx, topics, handler, retries, true) case ErrMemberIdRequired: - // from JoinGroupRequest v4, if client start with empty member id, - // it need to get member id from response and send another join request to join group + // from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts + // with an empty member id, it needs to get the assigned id from the + // response and send another join request with that id to actually join the + // group c.memberID = join.MemberId - return c.retryNewSession(ctx, topics, handler, retries+1 /*keep retry time*/, false) + return c.newSession(ctx, topics, handler, retries) case ErrFencedInstancedId: if c.groupInstanceId != nil { Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) @@ -342,13 +353,15 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler // Prepare distribution plan if we joined as the leader var plan BalanceStrategyPlan var members map[string]ConsumerGroupMemberMetadata + var allSubscribedTopicPartitions map[string][]int32 + var allSubscribedTopics []string if join.LeaderId == join.MemberId { members, err = join.GetMembers() if err != nil { return nil, err } - plan, err = c.balance(strategy, members) + allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members) if err != nil { return nil, err } @@ -403,7 +416,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler claims = members.Topics // in the case of stateful balance strategies, hold on to the returned - // assignment metadata, otherwise, reset the statically defined conusmer + // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData @@ -416,7 +429,17 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } } - return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) + session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) + if err != nil { + return nil, err + } + + // only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance + if join.LeaderId == join.MemberId { + go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session) + } + + return session, err } func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { @@ -430,7 +453,23 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.Version = 1 req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } - if c.groupInstanceId != nil { + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 3 + } + // from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually + // send two JoinGroupRequests, once with the empty member id, and then again + // with the assigned id from the first response. This is handled via the + // ErrMemberIdRequired case. + if c.config.Version.IsAtLeast(V2_2_0_0) { + req.Version = 4 + } + if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 5 req.GroupInstanceId = c.groupInstanceId } @@ -479,12 +518,19 @@ func (c *consumerGroup) syncGroupRequest( GenerationId: generationID, } + // Versions 1 and 2 are the same as version 0. + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 - } - if c.groupInstanceId != nil { req.GroupInstanceId = c.groupInstanceId } + for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) @@ -513,7 +559,16 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g MemberId: memberID, GenerationId: generationID, } - if c.groupInstanceId != nil { + + // Version 1 and version 2 are the same as version 0. + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. + if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 req.GroupInstanceId = c.groupInstanceId } @@ -521,23 +576,36 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g return coordinator.Heartbeat(req) } -func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { - topics := make(map[string][]int32) +func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) { + topicPartitions := make(map[string][]int32) for _, meta := range members { for _, topic := range meta.Topics { - topics[topic] = nil + topicPartitions[topic] = nil } } - for topic := range topics { + allSubscribedTopics := make([]string, 0, len(topicPartitions)) + for topic := range topicPartitions { + allSubscribedTopics = append(allSubscribedTopics, topic) + } + + // refresh metadata for all the subscribed topics in the consumer group + // to avoid using stale metadata to assigning partitions + err := c.client.RefreshMetadata(allSubscribedTopics...) + if err != nil { + return nil, nil, nil, err + } + + for topic := range topicPartitions { partitions, err := c.client.Partitions(topic) if err != nil { - return nil, err + return nil, nil, nil, err } - topics[topic] = partitions + topicPartitions[topic] = partitions } - return strategy.Plan(members, topics) + plan, err := strategy.Plan(members, topicPartitions) + return topicPartitions, allSubscribedTopics, plan, err } // Leaves the cluster, called by Close. @@ -553,32 +621,43 @@ func (c *consumerGroup) leave() error { return err } - // KIP-345 if groupInstanceId is set, don not leave group when consumer closed. - // Since we do not discover ApiVersion for brokers, LeaveGroupRequest still use the old version request for now - if c.groupInstanceId == nil { - resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ - GroupId: c.groupID, + // as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID + if c.groupInstanceId != nil { + c.memberID = "" + return nil + } + req := &LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + } + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V2_4_0_0) { + req.Version = 3 + req.Members = append(req.Members, MemberIdentity{ MemberId: c.memberID, }) - if err != nil { - _ = coordinator.Close() - return err - } + } - // Unset memberID - c.memberID = "" + resp, err := coordinator.LeaveGroup(req) + if err != nil { + _ = coordinator.Close() + return err + } - // Check response - switch resp.Err { - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: - return nil - default: - return resp.Err - } - } else { - c.memberID = "" + // clear the memberID + c.memberID = "" + + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err } - return nil } func (c *consumerGroup) handleError(err error, topic string, partition int32) { @@ -612,24 +691,29 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } } -func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { +func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) { if c.config.Metadata.RefreshFrequency == time.Duration(0) { return } - pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() - defer pause.Stop() - var oldTopicToPartitionNum map[string]int - var err error - if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { - return + + oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions)) + for topic, partitions := range allSubscribedTopicPartitions { + oldTopicToPartitionNum[topic] = len(partitions) } + + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer pause.Stop() for { if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { return } else { for topic, num := range oldTopicToPartitionNum { if newTopicToPartitionNum[topic] != num { + Logger.Printf( + "consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n", + c.groupID, topics, num, newTopicToPartitionNum[topic]) return // trigger the end of the session on exit } } @@ -638,7 +722,7 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons case <-pause.C: case <-session.ctx.Done(): Logger.Printf( - "consumergroup/%s loop check partition number coroutine will exit, topics %s\n", + "consumergroup/%s loop check partition number goroutine will exit, topics %s\n", c.groupID, topics) // if session closed by other, should be exited return @@ -1013,7 +1097,7 @@ type ConsumerGroupClaim interface { // InitialOffset returns the initial offset that was used as a starting point for this claim. InitialOffset() int64 - // HighWaterMarkOffset returns the high water mark offset of the partition, + // HighWaterMarkOffset returns the high watermark offset of the partition, // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go similarity index 73% rename from vendor/github.com/Shopify/sarama/consumer_group_members.go rename to vendor/github.com/IBM/sarama/consumer_group_members.go index 3b8ca36f60eae..2d38960919940 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/IBM/sarama/consumer_group_members.go @@ -9,6 +9,8 @@ type ConsumerGroupMemberMetadata struct { Topics []string UserData []byte OwnedPartitions []*OwnedPartition + GenerationID int32 + RackID *string } func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { @@ -22,6 +24,27 @@ func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { return err } + if m.Version >= 1 { + if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil { + return err + } + for _, op := range m.OwnedPartitions { + if err := op.encode(pe); err != nil { + return err + } + } + } + + if m.Version >= 2 { + pe.putInt32(m.GenerationID) + } + + if m.Version >= 3 { + if err := pe.putNullableString(m.RackID); err != nil { + return err + } + } + return nil } @@ -48,18 +71,29 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { } return err } - if n == 0 { - return nil - } - m.OwnedPartitions = make([]*OwnedPartition, n) - for i := 0; i < n; i++ { - m.OwnedPartitions[i] = &OwnedPartition{} - if err := m.OwnedPartitions[i].decode(pd); err != nil { - return err + if n > 0 { + m.OwnedPartitions = make([]*OwnedPartition, n) + for i := 0; i < n; i++ { + m.OwnedPartitions[i] = &OwnedPartition{} + if err := m.OwnedPartitions[i].decode(pd); err != nil { + return err + } } } } + if m.Version >= 2 { + if m.GenerationID, err = pd.getInt32(); err != nil { + return err + } + } + + if m.Version >= 3 { + if m.RackID, err = pd.getNullableString(); err != nil { + return err + } + } + return nil } @@ -68,6 +102,16 @@ type OwnedPartition struct { Partitions []int32 } +func (m *OwnedPartition) encode(pe packetEncoder) error { + if err := pe.putString(m.Topic); err != nil { + return err + } + if err := pe.putInt32Array(m.Partitions); err != nil { + return err + } + return nil +} + func (m *OwnedPartition) decode(pd packetDecoder) (err error) { if m.Topic, err = pd.getString(); err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go similarity index 75% rename from vendor/github.com/Shopify/sarama/consumer_metadata_request.go rename to vendor/github.com/IBM/sarama/consumer_metadata_request.go index 5c18e048a7206..ef6b9e7217347 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/IBM/sarama/consumer_metadata_request.go @@ -2,6 +2,7 @@ package sarama // ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { + Version int16 ConsumerGroup string } @@ -9,6 +10,7 @@ func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { tmp := new(FindCoordinatorRequest) tmp.CoordinatorKey = r.ConsumerGroup tmp.CoordinatorType = CoordinatorGroup + tmp.Version = r.Version return tmp.encode(pe) } @@ -26,13 +28,24 @@ func (r *ConsumerMetadataRequest) key() int16 { } func (r *ConsumerMetadataRequest) version() int16 { - return 0 + return r.Version } func (r *ConsumerMetadataRequest) headerVersion() int16 { return 1 } +func (r *ConsumerMetadataRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { - return V0_8_2_0 + switch r.Version { + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/consumer_metadata_response.go rename to vendor/github.com/IBM/sarama/consumer_metadata_response.go index 7fe0cf9716de0..d99209e3b63d4 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/IBM/sarama/consumer_metadata_response.go @@ -7,6 +7,7 @@ import ( // ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { + Version int16 Err KError Coordinator *Broker CoordinatorID int32 // deprecated: use Coordinator.ID() @@ -53,7 +54,7 @@ func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { } tmp := &FindCoordinatorResponse{ - Version: 0, + Version: r.Version, Err: r.Err, Coordinator: r.Coordinator, } @@ -70,13 +71,24 @@ func (r *ConsumerMetadataResponse) key() int16 { } func (r *ConsumerMetadataResponse) version() int16 { - return 0 + return r.Version } func (r *ConsumerMetadataResponse) headerVersion() int16 { return 0 } +func (r *ConsumerMetadataResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { - return V0_8_2_0 + switch r.Version { + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } } diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/control_record.go rename to vendor/github.com/IBM/sarama/control_record.go diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/crc32_field.go rename to vendor/github.com/IBM/sarama/crc32_field.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/create_partitions_request.go rename to vendor/github.com/IBM/sarama/create_partitions_request.go index 46fb0440249c5..3f5512656bbb5 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_request.go +++ b/vendor/github.com/IBM/sarama/create_partitions_request.go @@ -3,6 +3,7 @@ package sarama import "time" type CreatePartitionsRequest struct { + Version int16 TopicPartitions map[string]*TopicPartition Timeout time.Duration ValidateOnly bool @@ -64,15 +65,26 @@ func (r *CreatePartitionsRequest) key() int16 { } func (r *CreatePartitionsRequest) version() int16 { - return 0 + return r.Version } func (r *CreatePartitionsRequest) headerVersion() int16 { return 1 } +func (r *CreatePartitionsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_0_0_0 + default: + return V2_0_0_0 + } } type TopicPartition struct { diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/create_partitions_response.go rename to vendor/github.com/IBM/sarama/create_partitions_response.go index 235787f133ec7..c9e7ea72cd5a3 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/IBM/sarama/create_partitions_response.go @@ -6,6 +6,7 @@ import ( ) type CreatePartitionsResponse struct { + Version int16 ThrottleTime time.Duration TopicPartitionErrors map[string]*TopicPartitionError } @@ -60,15 +61,30 @@ func (r *CreatePartitionsResponse) key() int16 { } func (r *CreatePartitionsResponse) version() int16 { - return 0 + return r.Version } func (r *CreatePartitionsResponse) headerVersion() int16 { return 0 } +func (r *CreatePartitionsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_0_0_0 + default: + return V2_0_0_0 + } +} + +func (r *CreatePartitionsResponse) throttleTime() time.Duration { + return r.ThrottleTime } type TopicPartitionError struct { diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go similarity index 74% rename from vendor/github.com/Shopify/sarama/create_topics_request.go rename to vendor/github.com/IBM/sarama/create_topics_request.go index 287acd069b6ff..8382d17c20a74 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_request.go +++ b/vendor/github.com/IBM/sarama/create_topics_request.go @@ -5,10 +5,14 @@ import ( ) type CreateTopicsRequest struct { + // Version defines the protocol version to use for encode and decode Version int16 - + // TopicDetails contains the topics to create. TopicDetails map[string]*TopicDetail - Timeout time.Duration + // Timeout contains how long to wait before timing out the request. + Timeout time.Duration + // ValidateOnly if true, check that the topics can be created as specified, + // but don't create anything. ValidateOnly bool } @@ -83,22 +87,39 @@ func (r *CreateTopicsRequest) headerVersion() int16 { return 1 } +func (c *CreateTopicsRequest) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 3 +} + func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { + case 3: + return V2_0_0_0 case 2: - return V1_0_0_0 - case 1: return V0_11_0_0 - default: + case 1: + return V0_10_2_0 + case 0: return V0_10_1_0 + default: + return V2_8_0_0 } } type TopicDetail struct { - NumPartitions int32 + // NumPartitions contains the number of partitions to create in the topic, or + // -1 if we are either specifying a manual partition assignment or using the + // default partitions. + NumPartitions int32 + // ReplicationFactor contains the number of replicas to create for each + // partition in the topic, or -1 if we are either specifying a manual + // partition assignment or using the default replication factor. ReplicationFactor int16 + // ReplicaAssignment contains the manual partition assignment, or the empty + // array if we are using automatic assignment. ReplicaAssignment map[int32][]int32 - ConfigEntries map[string]*string + // ConfigEntries contains the custom topic configurations to set. + ConfigEntries map[string]*string } func (t *TopicDetail) encode(pe packetEncoder) error { diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go similarity index 78% rename from vendor/github.com/Shopify/sarama/create_topics_response.go rename to vendor/github.com/IBM/sarama/create_topics_response.go index 6b940bff065ab..85bd4c0b93d86 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/IBM/sarama/create_topics_response.go @@ -6,9 +6,13 @@ import ( ) type CreateTopicsResponse struct { - Version int16 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration for which the request was throttled due + // to a quota violation, or zero if the request did not violate any quota. ThrottleTime time.Duration - TopicErrors map[string]*TopicError + // TopicErrors contains a map of any errors for the topics we tried to create. + TopicErrors map[string]*TopicError } func (c *CreateTopicsResponse) encode(pe packetEncoder) error { @@ -74,17 +78,29 @@ func (c *CreateTopicsResponse) headerVersion() int16 { return 0 } +func (c *CreateTopicsResponse) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 3 +} + func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { + case 3: + return V2_0_0_0 case 2: - return V1_0_0_0 - case 1: return V0_11_0_0 - default: + case 1: + return V0_10_2_0 + case 0: return V0_10_1_0 + default: + return V2_8_0_0 } } +func (r *CreateTopicsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + type TopicError struct { Err KError ErrMsg *string diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go new file mode 100644 index 0000000000000..0a099832944ee --- /dev/null +++ b/vendor/github.com/IBM/sarama/decompress.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "bytes" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/klauspost/compress/gzip" + "github.com/pierrec/lz4/v4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool + + bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + bytesPool = sync.Pool{ + New: func() interface{} { + res := make([]byte, 0, 4096) + return &res + }, + } +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + buffer := bufferPool.Get().(*bytes.Buffer) + _, err = buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse gzipReader and buffer + gzipReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + buffer := bufferPool.Get().(*bytes.Buffer) + _, err := buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse lz4Reader and buffer + lz4ReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionZSTD: + buffer := *bytesPool.Get().(*[]byte) + var err error + buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) + // copy the buffer to a new slice with the correct length and reuse buffer + res := make([]byte, len(buffer)) + copy(res, buffer) + buffer = buffer[:0] + bytesPool.Put(&buffer) + + return res, err + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go similarity index 71% rename from vendor/github.com/Shopify/sarama/delete_groups_request.go rename to vendor/github.com/IBM/sarama/delete_groups_request.go index 4ac8bbee4cb1a..2fdfc33869ef4 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_request.go +++ b/vendor/github.com/IBM/sarama/delete_groups_request.go @@ -1,7 +1,8 @@ package sarama type DeleteGroupsRequest struct { - Groups []string + Version int16 + Groups []string } func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { @@ -18,15 +19,26 @@ func (r *DeleteGroupsRequest) key() int16 { } func (r *DeleteGroupsRequest) version() int16 { - return 0 + return r.Version } func (r *DeleteGroupsRequest) headerVersion() int16 { return 1 } +func (r *DeleteGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { - return V1_1_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_1_0_0 + default: + return V2_0_0_0 + } } func (r *DeleteGroupsRequest) AddGroup(group string) { diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go similarity index 80% rename from vendor/github.com/Shopify/sarama/delete_groups_response.go rename to vendor/github.com/IBM/sarama/delete_groups_response.go index 5e7b1ed3681eb..e490f831468cc 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_response.go +++ b/vendor/github.com/IBM/sarama/delete_groups_response.go @@ -5,6 +5,7 @@ import ( ) type DeleteGroupsResponse struct { + Version int16 ThrottleTime time.Duration GroupErrorCodes map[string]KError } @@ -62,13 +63,28 @@ func (r *DeleteGroupsResponse) key() int16 { } func (r *DeleteGroupsResponse) version() int16 { - return 0 + return r.Version } func (r *DeleteGroupsResponse) headerVersion() int16 { return 0 } +func (r *DeleteGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { - return V1_1_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_1_0_0 + default: + return V2_0_0_0 + } +} + +func (r *DeleteGroupsResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/delete_offsets_request.go rename to vendor/github.com/IBM/sarama/delete_offsets_request.go index 339c7857cac86..06b864d18f2cf 100644 --- a/vendor/github.com/Shopify/sarama/delete_offsets_request.go +++ b/vendor/github.com/IBM/sarama/delete_offsets_request.go @@ -1,6 +1,7 @@ package sarama type DeleteOffsetsRequest struct { + Version int16 Group string partitions map[string][]int32 } @@ -72,13 +73,17 @@ func (r *DeleteOffsetsRequest) key() int16 { } func (r *DeleteOffsetsRequest) version() int16 { - return 0 + return r.Version } func (r *DeleteOffsetsRequest) headerVersion() int16 { return 1 } +func (r *DeleteOffsetsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/delete_offsets_response.go rename to vendor/github.com/IBM/sarama/delete_offsets_response.go index d59ae0f8c1c7e..86c6c51f68f06 100644 --- a/vendor/github.com/Shopify/sarama/delete_offsets_response.go +++ b/vendor/github.com/IBM/sarama/delete_offsets_response.go @@ -5,6 +5,7 @@ import ( ) type DeleteOffsetsResponse struct { + Version int16 // The top-level error code, or 0 if there was no error. ErrorCode KError ThrottleTime time.Duration @@ -100,13 +101,21 @@ func (r *DeleteOffsetsResponse) key() int16 { } func (r *DeleteOffsetsResponse) version() int16 { - return 0 + return r.Version } func (r *DeleteOffsetsResponse) headerVersion() int16 { return 0 } +func (r *DeleteOffsetsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *DeleteOffsetsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/delete_records_request.go rename to vendor/github.com/IBM/sarama/delete_records_request.go index dc106b17d62b5..3ca2146afb22f 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_request.go +++ b/vendor/github.com/IBM/sarama/delete_records_request.go @@ -13,6 +13,7 @@ import ( // id(int32) offset(int64) type DeleteRecordsRequest struct { + Version int16 Topics map[string]*DeleteRecordsRequestTopic Timeout time.Duration } @@ -74,15 +75,24 @@ func (d *DeleteRecordsRequest) key() int16 { } func (d *DeleteRecordsRequest) version() int16 { - return 0 + return d.Version } func (d *DeleteRecordsRequest) headerVersion() int16 { return 1 } +func (d *DeleteRecordsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } type DeleteRecordsRequestTopic struct { diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/delete_records_response.go rename to vendor/github.com/IBM/sarama/delete_records_response.go index d530b4c7e912b..2d7db885b162a 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_response.go +++ b/vendor/github.com/IBM/sarama/delete_records_response.go @@ -77,15 +77,28 @@ func (d *DeleteRecordsResponse) key() int16 { } func (d *DeleteRecordsResponse) version() int16 { - return 0 + return d.Version } func (d *DeleteRecordsResponse) headerVersion() int16 { return 0 } +func (d *DeleteRecordsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *DeleteRecordsResponse) throttleTime() time.Duration { + return r.ThrottleTime } type DeleteRecordsResponseTopic struct { diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go similarity index 84% rename from vendor/github.com/Shopify/sarama/delete_topics_request.go rename to vendor/github.com/IBM/sarama/delete_topics_request.go index ba6780a8e39cf..252c0d0259461 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_request.go +++ b/vendor/github.com/IBM/sarama/delete_topics_request.go @@ -42,11 +42,21 @@ func (d *DeleteTopicsRequest) headerVersion() int16 { return 1 } +func (d *DeleteTopicsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 3 +} + func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { + case 3: + return V2_1_0_0 + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 - default: + case 0: return V0_10_1_0 + default: + return V2_2_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go similarity index 84% rename from vendor/github.com/Shopify/sarama/delete_topics_response.go rename to vendor/github.com/IBM/sarama/delete_topics_response.go index 733961a89a031..556da689215ff 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_response.go +++ b/vendor/github.com/IBM/sarama/delete_topics_response.go @@ -72,11 +72,25 @@ func (d *DeleteTopicsResponse) headerVersion() int16 { return 0 } +func (d *DeleteTopicsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 3 +} + func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { + case 3: + return V2_1_0_0 + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 - default: + case 0: return V0_10_1_0 + default: + return V2_2_0_0 } } + +func (r *DeleteTopicsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_request.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_request.go index 17a82051c5121..8869145c371c0 100644 --- a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go +++ b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go @@ -11,6 +11,7 @@ package sarama // Components: the components to filter on // Strict: whether the filter only includes specified components type DescribeClientQuotasRequest struct { + Version int16 Components []QuotaFilterComponent Strict bool } @@ -129,13 +130,17 @@ func (d *DescribeClientQuotasRequest) key() int16 { } func (d *DescribeClientQuotasRequest) version() int16 { - return 0 + return d.Version } func (d *DescribeClientQuotasRequest) headerVersion() int16 { return 1 } +func (d *DescribeClientQuotasRequest) isValidVersion() bool { + return d.Version == 0 +} + func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_response.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_response.go index 555da0c485d2c..e9bf658adba16 100644 --- a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go +++ b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go @@ -17,6 +17,7 @@ import ( // value => FLOAT64 type DescribeClientQuotasResponse struct { + Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ErrorCode KError // The error code, or `0` if the quota description succeeded. ErrorMsg *string // The error message, or `null` if the quota description succeeded. @@ -223,13 +224,21 @@ func (d *DescribeClientQuotasResponse) key() int16 { } func (d *DescribeClientQuotasResponse) version() int16 { - return 0 + return d.Version } func (d *DescribeClientQuotasResponse) headerVersion() int16 { return 0 } +func (d *DescribeClientQuotasResponse) isValidVersion() bool { + return d.Version == 0 +} + func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } + +func (r *DescribeClientQuotasResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_configs_request.go rename to vendor/github.com/IBM/sarama/describe_configs_request.go index 4c34880318c87..d0ab0d6ef7672 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/IBM/sarama/describe_configs_request.go @@ -103,13 +103,19 @@ func (r *DescribeConfigsRequest) headerVersion() int16 { return 1 } +func (r *DescribeConfigsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V1_1_0_0 case 2: return V2_0_0_0 - default: + case 1: + return V1_1_0_0 + case 0: return V0_11_0_0 + default: + return V2_0_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_configs_response.go rename to vendor/github.com/IBM/sarama/describe_configs_response.go index 4968f4854a622..386a56885a44b 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/IBM/sarama/describe_configs_response.go @@ -34,6 +34,19 @@ const ( SourceDefault ) +type DescribeConfigError struct { + Err KError + ErrMsg string +} + +func (c *DescribeConfigError) Error() string { + text := c.Err.Error() + if c.ErrMsg != "" { + text = fmt.Sprintf("%s - %s", text, c.ErrMsg) + } + return text +} + type DescribeConfigsResponse struct { Version int16 ThrottleTime time.Duration @@ -116,17 +129,27 @@ func (r *DescribeConfigsResponse) headerVersion() int16 { return 0 } +func (r *DescribeConfigsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V1_0_0_0 case 2: return V2_0_0_0 - default: + case 1: + return V1_1_0_0 + case 0: return V0_11_0_0 + default: + return V2_0_0_0 } } +func (r *DescribeConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(r.ErrorCode) diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go similarity index 81% rename from vendor/github.com/Shopify/sarama/describe_groups_request.go rename to vendor/github.com/IBM/sarama/describe_groups_request.go index f81f69ac4b8fc..c43262e86df7b 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/IBM/sarama/describe_groups_request.go @@ -42,12 +42,25 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { return 1 } +func (r *DescribeGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 4: + return V2_4_0_0 + case 3: return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 } func (r *DescribeGroupsRequest) AddGroup(group string) { diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_groups_response.go rename to vendor/github.com/IBM/sarama/describe_groups_response.go index 09052e4310c95..dbc46dd089a5e 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/IBM/sarama/describe_groups_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type DescribeGroupsResponse struct { // Version defines the protocol version to use for encode and decode Version int16 @@ -63,12 +65,29 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { return 0 } +func (r *DescribeGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 4: + return V2_4_0_0 + case 3: return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 +} + +func (r *DescribeGroupsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond } // GroupDescription contains each described group. diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_request.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_request.go index c0bf04e04e278..a6613c3200782 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go +++ b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go @@ -82,6 +82,13 @@ func (r *DescribeLogDirsRequest) headerVersion() int16 { return 1 } +func (r *DescribeLogDirsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + if r.Version > 0 { + return V2_0_0_0 + } return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_response.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_response.go index 411da38ad2045..41b4968dab616 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go +++ b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go @@ -65,10 +65,21 @@ func (r *DescribeLogDirsResponse) headerVersion() int16 { return 0 } +func (r *DescribeLogDirsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + if r.Version > 0 { + return V2_0_0_0 + } return V1_0_0_0 } +func (r *DescribeLogDirsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + type DescribeLogDirsResponseDirMetadata struct { ErrorCode KError diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go index b5b59404bdce7..a6265de5f1a31 100644 --- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go +++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go @@ -65,6 +65,10 @@ func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 { return 2 } +func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go index 2656c2faa1c16..a55c3f0ee5f86 100644 --- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go +++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go @@ -163,6 +163,14 @@ func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 { return 2 } +func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } + +func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml similarity index 100% rename from vendor/github.com/Shopify/sarama/dev.yml rename to vendor/github.com/IBM/sarama/dev.yml diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml similarity index 62% rename from vendor/github.com/Shopify/sarama/docker-compose.yml rename to vendor/github.com/IBM/sarama/docker-compose.yml index e1119c87fbc08..204768e320357 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,7 +1,8 @@ -version: '3.7' services: zookeeper-1: + hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '1' @@ -12,7 +13,9 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: + hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '2' @@ -23,7 +26,9 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: + hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '3' @@ -34,13 +39,35 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - image: 'sarama/fv-kafka' + hostname: 'kafka-1' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-1:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' @@ -55,14 +82,38 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: - image: 'sarama/fv-kafka' + hostname: 'kafka-2' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-2:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' @@ -77,14 +128,38 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: - image: 'sarama/fv-kafka' + hostname: 'kafka-3' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-3:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' @@ -99,14 +174,38 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: - image: 'sarama/fv-kafka' + hostname: 'kafka-4' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-4:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' @@ -121,14 +220,38 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: - image: 'sarama/fv-kafka' + hostname: 'kafka-5' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-5:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' @@ -143,8 +266,18 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: + hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' + init: true + healthcheck: + test: ['CMD', '/toxiproxy-cli', 'l'] + interval: 15s + timeout: 15s + retries: 3 + start_period: 30s ports: # The tests themselves actually start the proxies on these ports - '29091:29091' @@ -152,5 +285,6 @@ services: - '29093:29093' - '29094:29094' - '29095:29095' + # This is the toxiproxy API port - '8474:8474' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/encoder_decoder.go rename to vendor/github.com/IBM/sarama/encoder_decoder.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go similarity index 80% rename from vendor/github.com/Shopify/sarama/end_txn_request.go rename to vendor/github.com/IBM/sarama/end_txn_request.go index 6635425ddd621..638099a5d8f27 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_request.go +++ b/vendor/github.com/IBM/sarama/end_txn_request.go @@ -1,6 +1,7 @@ package sarama type EndTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -42,13 +43,24 @@ func (a *EndTxnRequest) key() int16 { } func (a *EndTxnRequest) version() int16 { - return 0 + return a.Version } func (r *EndTxnRequest) headerVersion() int16 { return 1 } +func (a *EndTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *EndTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go similarity index 71% rename from vendor/github.com/Shopify/sarama/end_txn_response.go rename to vendor/github.com/IBM/sarama/end_txn_response.go index dd2a045048b43..54597df8c7166 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/IBM/sarama/end_txn_response.go @@ -5,6 +5,7 @@ import ( ) type EndTxnResponse struct { + Version int16 ThrottleTime time.Duration Err KError } @@ -36,13 +37,28 @@ func (e *EndTxnResponse) key() int16 { } func (e *EndTxnResponse) version() int16 { - return 0 + return e.Version } func (r *EndTxnResponse) headerVersion() int16 { return 0 } +func (e *EndTxnResponse) isValidVersion() bool { + return e.Version >= 0 && e.Version <= 2 +} + func (e *EndTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch e.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *EndTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh new file mode 100644 index 0000000000000..9fe9a44b1d946 --- /dev/null +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -eu +set -o pipefail + +KAFKA_VERSION="${KAFKA_VERSION:-3.6.0}" +KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" + +if [ ! -d "${KAFKA_HOME}" ]; then + echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME + exit 1 +fi + +cd "${KAFKA_HOME}" || exit 1 + +# discard all empty/commented lines from default config and copy to /tmp +sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties + +echo "########################################################################" >>/tmp/server.properties + +# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka +for var in "${!KAFKA_CFG_@}"; do + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" + sed -e '/^'$key'/d' -i"" /tmp/server.properties + value="${!var}" + echo "$key=$value" >>/tmp/server.properties +done + +sort /tmp/server.properties + +exec bin/kafka-server-start.sh /tmp/server.properties diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go similarity index 69% rename from vendor/github.com/Shopify/sarama/errors.go rename to vendor/github.com/IBM/sarama/errors.go index 27977f1662903..2c431aecb05f0 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -79,7 +79,7 @@ var ErrTransactionNotReady = errors.New("transaction manager: transaction is not // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") -// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +// ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. @@ -89,7 +89,7 @@ var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transi var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The -// default implementation is a consensed version of the hashicorp/go-multierror +// default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { @@ -173,98 +173,98 @@ type KError int16 // Numeric error codes returned by the Kafka server. const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 - ErrTopicAlreadyExists KError = 36 - ErrInvalidPartitions KError = 37 - ErrInvalidReplicationFactor KError = 38 - ErrInvalidReplicaAssignment KError = 39 - ErrInvalidConfig KError = 40 - ErrNotController KError = 41 - ErrInvalidRequest KError = 42 - ErrUnsupportedForMessageFormat KError = 43 - ErrPolicyViolation KError = 44 - ErrOutOfOrderSequenceNumber KError = 45 - ErrDuplicateSequenceNumber KError = 46 - ErrInvalidProducerEpoch KError = 47 - ErrInvalidTxnState KError = 48 - ErrInvalidProducerIDMapping KError = 49 - ErrInvalidTransactionTimeout KError = 50 - ErrConcurrentTransactions KError = 51 - ErrTransactionCoordinatorFenced KError = 52 - ErrTransactionalIDAuthorizationFailed KError = 53 - ErrSecurityDisabled KError = 54 - ErrOperationNotAttempted KError = 55 - ErrKafkaStorageError KError = 56 - ErrLogDirNotFound KError = 57 - ErrSASLAuthenticationFailed KError = 58 - ErrUnknownProducerID KError = 59 - ErrReassignmentInProgress KError = 60 - ErrDelegationTokenAuthDisabled KError = 61 - ErrDelegationTokenNotFound KError = 62 - ErrDelegationTokenOwnerMismatch KError = 63 - ErrDelegationTokenRequestNotAllowed KError = 64 - ErrDelegationTokenAuthorizationFailed KError = 65 - ErrDelegationTokenExpired KError = 66 - ErrInvalidPrincipalType KError = 67 - ErrNonEmptyGroup KError = 68 - ErrGroupIDNotFound KError = 69 - ErrFetchSessionIDNotFound KError = 70 - ErrInvalidFetchSessionEpoch KError = 71 - ErrListenerNotFound KError = 72 - ErrTopicDeletionDisabled KError = 73 - ErrFencedLeaderEpoch KError = 74 - ErrUnknownLeaderEpoch KError = 75 - ErrUnsupportedCompressionType KError = 76 - ErrStaleBrokerEpoch KError = 77 - ErrOffsetNotAvailable KError = 78 - ErrMemberIdRequired KError = 79 - ErrPreferredLeaderNotAvailable KError = 80 - ErrGroupMaxSizeReached KError = 81 - ErrFencedInstancedId KError = 82 - ErrEligibleLeadersNotAvailable KError = 83 - ErrElectionNotNeeded KError = 84 - ErrNoReassignmentInProgress KError = 85 - ErrGroupSubscribedToTopic KError = 86 - ErrInvalidRecord KError = 87 - ErrUnstableOffsetCommit KError = 88 - ErrThrottlingQuotaExceeded KError = 89 - ErrProducerFenced KError = 90 + ErrUnknown KError = -1 // Errors.UNKNOWN_SERVER_ERROR + ErrNoError KError = 0 // Errors.NONE + ErrOffsetOutOfRange KError = 1 // Errors.OFFSET_OUT_OF_RANGE + ErrInvalidMessage KError = 2 // Errors.CORRUPT_MESSAGE + ErrUnknownTopicOrPartition KError = 3 // Errors.UNKNOWN_TOPIC_OR_PARTITION + ErrInvalidMessageSize KError = 4 // Errors.INVALID_FETCH_SIZE + ErrLeaderNotAvailable KError = 5 // Errors.LEADER_NOT_AVAILABLE + ErrNotLeaderForPartition KError = 6 // Errors.NOT_LEADER_OR_FOLLOWER + ErrRequestTimedOut KError = 7 // Errors.REQUEST_TIMED_OUT + ErrBrokerNotAvailable KError = 8 // Errors.BROKER_NOT_AVAILABLE + ErrReplicaNotAvailable KError = 9 // Errors.REPLICA_NOT_AVAILABLE + ErrMessageSizeTooLarge KError = 10 // Errors.MESSAGE_TOO_LARGE + ErrStaleControllerEpochCode KError = 11 // Errors.STALE_CONTROLLER_EPOCH + ErrOffsetMetadataTooLarge KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE + ErrNetworkException KError = 13 // Errors.NETWORK_EXCEPTION + ErrOffsetsLoadInProgress KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS + ErrConsumerCoordinatorNotAvailable KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE + ErrNotCoordinatorForConsumer KError = 16 // Errors.NOT_COORDINATOR + ErrInvalidTopic KError = 17 // Errors.INVALID_TOPIC_EXCEPTION + ErrMessageSetSizeTooLarge KError = 18 // Errors.RECORD_LIST_TOO_LARGE + ErrNotEnoughReplicas KError = 19 // Errors.NOT_ENOUGH_REPLICAS + ErrNotEnoughReplicasAfterAppend KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND + ErrInvalidRequiredAcks KError = 21 // Errors.INVALID_REQUIRED_ACKS + ErrIllegalGeneration KError = 22 // Errors.ILLEGAL_GENERATION + ErrInconsistentGroupProtocol KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL + ErrInvalidGroupId KError = 24 // Errors.INVALID_GROUP_ID + ErrUnknownMemberId KError = 25 // Errors.UNKNOWN_MEMBER_ID + ErrInvalidSessionTimeout KError = 26 // Errors.INVALID_SESSION_TIMEOUT + ErrRebalanceInProgress KError = 27 // Errors.REBALANCE_IN_PROGRESS + ErrInvalidCommitOffsetSize KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE + ErrTopicAuthorizationFailed KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED + ErrGroupAuthorizationFailed KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED + ErrClusterAuthorizationFailed KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED + ErrInvalidTimestamp KError = 32 // Errors.INVALID_TIMESTAMP + ErrUnsupportedSASLMechanism KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM + ErrIllegalSASLState KError = 34 // Errors.ILLEGAL_SASL_STATE + ErrUnsupportedVersion KError = 35 // Errors.UNSUPPORTED_VERSION + ErrTopicAlreadyExists KError = 36 // Errors.TOPIC_ALREADY_EXISTS + ErrInvalidPartitions KError = 37 // Errors.INVALID_PARTITIONS + ErrInvalidReplicationFactor KError = 38 // Errors.INVALID_REPLICATION_FACTOR + ErrInvalidReplicaAssignment KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT + ErrInvalidConfig KError = 40 // Errors.INVALID_CONFIG + ErrNotController KError = 41 // Errors.NOT_CONTROLLER + ErrInvalidRequest KError = 42 // Errors.INVALID_REQUEST + ErrUnsupportedForMessageFormat KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT + ErrPolicyViolation KError = 44 // Errors.POLICY_VIOLATION + ErrOutOfOrderSequenceNumber KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER + ErrDuplicateSequenceNumber KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER + ErrInvalidProducerEpoch KError = 47 // Errors.INVALID_PRODUCER_EPOCH + ErrInvalidTxnState KError = 48 // Errors.INVALID_TXN_STATE + ErrInvalidProducerIDMapping KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING + ErrInvalidTransactionTimeout KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT + ErrConcurrentTransactions KError = 51 // Errors.CONCURRENT_TRANSACTIONS + ErrTransactionCoordinatorFenced KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED + ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED + ErrSecurityDisabled KError = 54 // Errors.SECURITY_DISABLED + ErrOperationNotAttempted KError = 55 // Errors.OPERATION_NOT_ATTEMPTED + ErrKafkaStorageError KError = 56 // Errors.KAFKA_STORAGE_ERROR + ErrLogDirNotFound KError = 57 // Errors.LOG_DIR_NOT_FOUND + ErrSASLAuthenticationFailed KError = 58 // Errors.SASL_AUTHENTICATION_FAILED + ErrUnknownProducerID KError = 59 // Errors.UNKNOWN_PRODUCER_ID + ErrReassignmentInProgress KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS + ErrDelegationTokenAuthDisabled KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED + ErrDelegationTokenNotFound KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND + ErrDelegationTokenOwnerMismatch KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH + ErrDelegationTokenRequestNotAllowed KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED + ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED + ErrDelegationTokenExpired KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED + ErrInvalidPrincipalType KError = 67 // Errors.INVALID_PRINCIPAL_TYPE + ErrNonEmptyGroup KError = 68 // Errors.NON_EMPTY_GROUP + ErrGroupIDNotFound KError = 69 // Errors.GROUP_ID_NOT_FOUND + ErrFetchSessionIDNotFound KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND + ErrInvalidFetchSessionEpoch KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH + ErrListenerNotFound KError = 72 // Errors.LISTENER_NOT_FOUND + ErrTopicDeletionDisabled KError = 73 // Errors.TOPIC_DELETION_DISABLED + ErrFencedLeaderEpoch KError = 74 // Errors.FENCED_LEADER_EPOCH + ErrUnknownLeaderEpoch KError = 75 // Errors.UNKNOWN_LEADER_EPOCH + ErrUnsupportedCompressionType KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE + ErrStaleBrokerEpoch KError = 77 // Errors.STALE_BROKER_EPOCH + ErrOffsetNotAvailable KError = 78 // Errors.OFFSET_NOT_AVAILABLE + ErrMemberIdRequired KError = 79 // Errors.MEMBER_ID_REQUIRED + ErrPreferredLeaderNotAvailable KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE + ErrGroupMaxSizeReached KError = 81 // Errors.GROUP_MAX_SIZE_REACHED + ErrFencedInstancedId KError = 82 // Errors.FENCED_INSTANCE_ID + ErrEligibleLeadersNotAvailable KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE + ErrElectionNotNeeded KError = 84 // Errors.ELECTION_NOT_NEEDED + ErrNoReassignmentInProgress KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS + ErrGroupSubscribedToTopic KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC + ErrInvalidRecord KError = 87 // Errors.INVALID_RECORD + ErrUnstableOffsetCommit KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT + ErrThrottlingQuotaExceeded KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED + ErrProducerFenced KError = 90 // Errors.PRODUCER_FENCED ) func (err KError) Error() string { @@ -302,7 +302,7 @@ func (err KError) Error() string { case ErrNetworkException: return "kafka server: The server disconnected before a response was received" case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition" + return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: return "kafka server: Offset's topic has not yet been created" case ErrNotCoordinatorForConsumer: diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/fetch_request.go rename to vendor/github.com/IBM/sarama/fetch_request.go index 26adead4e24a0..a5314b55c818d 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/IBM/sarama/fetch_request.go @@ -1,5 +1,7 @@ package sarama +import "fmt" + type fetchRequestBlock struct { Version int16 // currentLeaderEpoch contains the current leader epoch of the partition. @@ -241,6 +243,9 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { if err != nil { return err } + if partitionCount < 0 { + return fmt.Errorf("partitionCount %d is invalid", partitionCount) + } r.forgotten[topic] = make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { @@ -275,30 +280,34 @@ func (r *FetchRequest) headerVersion() int16 { return 1 } +func (r *FetchRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 11 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return MinVersion - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4, 5: - return V0_11_0_0 - case 6: - return V1_0_0_0 - case 7: - return V1_1_0_0 - case 8: - return V2_0_0_0 - case 9, 10: - return V2_1_0_0 case 11: return V2_3_0_0 + case 9, 10: + return V2_1_0_0 + case 8: + return V2_0_0_0 + case 7: + return V1_1_0_0 + case 6: + return V1_0_0_0 + case 4, 5: + return V0_11_0_0 + case 3: + return V0_10_1_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MaxVersion + return V2_3_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go similarity index 98% rename from vendor/github.com/Shopify/sarama/fetch_response.go rename to vendor/github.com/IBM/sarama/fetch_response.go index 3d449c85e2350..02e8ca4736ab4 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/IBM/sarama/fetch_response.go @@ -386,33 +386,41 @@ func (r *FetchResponse) headerVersion() int16 { return 0 } +func (r *FetchResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 11 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return MinVersion - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4, 5: - return V0_11_0_0 - case 6: - return V1_0_0_0 - case 7: - return V1_1_0_0 - case 8: - return V2_0_0_0 - case 9, 10: - return V2_1_0_0 case 11: return V2_3_0_0 + case 9, 10: + return V2_1_0_0 + case 8: + return V2_0_0_0 + case 7: + return V1_1_0_0 + case 6: + return V1_0_0_0 + case 4, 5: + return V0_11_0_0 + case 3: + return V0_10_1_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MaxVersion + return V2_3_0_0 } } +func (r *FetchResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { return nil diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/find_coordinator_request.go rename to vendor/github.com/IBM/sarama/find_coordinator_request.go index 597bcbf786f55..4758835a1ce4e 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go +++ b/vendor/github.com/IBM/sarama/find_coordinator_request.go @@ -55,8 +55,14 @@ func (r *FindCoordinatorRequest) headerVersion() int16 { return 1 } +func (f *FindCoordinatorRequest) isValidVersion() bool { + return f.Version >= 0 && f.Version <= 2 +} + func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 default: diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go similarity index 89% rename from vendor/github.com/Shopify/sarama/find_coordinator_response.go rename to vendor/github.com/IBM/sarama/find_coordinator_response.go index 83a648ad4aec3..11b9920d02b98 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go +++ b/vendor/github.com/IBM/sarama/find_coordinator_response.go @@ -86,11 +86,21 @@ func (r *FindCoordinatorResponse) headerVersion() int16 { return 0 } +func (f *FindCoordinatorResponse) isValidVersion() bool { + return f.Version >= 0 && f.Version <= 2 +} + func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } + +func (r *FindCoordinatorResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go similarity index 94% rename from vendor/github.com/Shopify/sarama/gssapi_kerberos.go rename to vendor/github.com/IBM/sarama/gssapi_kerberos.go index ab8b70196f823..ccc01c19bd739 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go @@ -23,6 +23,7 @@ const ( GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 + KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 @@ -31,12 +32,14 @@ const ( type GSSAPIConfig struct { AuthType int KeyTabPath string + CCachePath string KerberosConfigPath string ServiceName string Username string Password string Realm string DisablePAFXFAST bool + BuildSpn BuildSpnFunc } type GSSAPIKerberosAuth struct { @@ -55,6 +58,8 @@ type KerberosClient interface { Destroy() } +type BuildSpnFunc func(serviceName, host string) string + // writePackage appends length in big endian before the payload, and sends it to kafka func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { length := uint64(len(payload)) @@ -209,10 +214,15 @@ func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { return err } // Construct SPN using serviceName and host - // SPN format: / + // default SPN format: / host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part - spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + var spn string + if krbAuth.Config.BuildSpn != nil { + spn = krbAuth.Config.BuildSpn(broker.conf.Net.SASL.GSSAPI.ServiceName, host) + } else { + spn = fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + } ticket, encKey, err := kerberosClient.GetServiceTicket(spn) if err != nil { diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go similarity index 83% rename from vendor/github.com/Shopify/sarama/heartbeat_request.go rename to vendor/github.com/IBM/sarama/heartbeat_request.go index 511910e712e1d..9f740f26c67d2 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/IBM/sarama/heartbeat_request.go @@ -60,10 +60,21 @@ func (r *HeartbeatRequest) headerVersion() int16 { return 1 } +func (r *HeartbeatRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { - switch { - case r.Version >= 3: + switch r.Version { + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_8_2_0 + default: return V2_3_0_0 } - return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go similarity index 71% rename from vendor/github.com/Shopify/sarama/heartbeat_response.go rename to vendor/github.com/IBM/sarama/heartbeat_response.go index 95ef97f47a471..a58718d7b56b3 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/IBM/sarama/heartbeat_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type HeartbeatResponse struct { Version int16 ThrottleTime int32 @@ -43,10 +45,25 @@ func (r *HeartbeatResponse) headerVersion() int16 { return 0 } +func (r *HeartbeatResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_8_2_0 + default: return V2_3_0_0 } - return V0_9_0_0 +} + +func (r *HeartbeatResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_request.go index c4d05a972041e..b1b490a282a7b 100644 --- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go +++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go @@ -11,6 +11,7 @@ const ( // IncrementalAlterConfigsRequest is an incremental alter config request type type IncrementalAlterConfigsRequest struct { + Version int16 Resources []*IncrementalAlterConfigsResource ValidateOnly bool } @@ -161,13 +162,17 @@ func (a *IncrementalAlterConfigsRequest) key() int16 { } func (a *IncrementalAlterConfigsRequest) version() int16 { - return 0 + return a.Version } func (a *IncrementalAlterConfigsRequest) headerVersion() int16 { return 1 } +func (a *IncrementalAlterConfigsRequest) isValidVersion() bool { + return a.Version == 0 +} + func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion { return V2_3_0_0 } diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_response.go index 3e8c4500c324a..3a2df2f606d6b 100644 --- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go +++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go @@ -4,6 +4,7 @@ import "time" // IncrementalAlterConfigsResponse is a response type for incremental alter config type IncrementalAlterConfigsResponse struct { + Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } @@ -54,13 +55,21 @@ func (a *IncrementalAlterConfigsResponse) key() int16 { } func (a *IncrementalAlterConfigsResponse) version() int16 { - return 0 + return a.Version } func (a *IncrementalAlterConfigsResponse) headerVersion() int16 { return 0 } +func (a *IncrementalAlterConfigsResponse) isValidVersion() bool { + return a.Version == 0 +} + func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion { return V2_3_0_0 } + +func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go similarity index 91% rename from vendor/github.com/Shopify/sarama/init_producer_id_request.go rename to vendor/github.com/IBM/sarama/init_producer_id_request.go index 33ce5fa41c890..dee50fb9fcccd 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/IBM/sarama/init_producer_id_request.go @@ -84,19 +84,23 @@ func (i *InitProducerIDRequest) headerVersion() int16 { return 1 } +func (i *InitProducerIDRequest) isValidVersion() bool { + return i.Version >= 0 && i.Version <= 4 +} + func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { switch i.Version { - case 2: - // Added tagged fields - return V2_4_0_0 + case 4: + return V2_7_0_0 case 3: - // Added ProducerID/Epoch return V2_5_0_0 - case 0: - fallthrough + case 2: + return V2_4_0_0 case 1: - fallthrough - default: + return V2_0_0_0 + case 0: return V0_11_0_0 + default: + return V2_7_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go similarity index 85% rename from vendor/github.com/Shopify/sarama/init_producer_id_response.go rename to vendor/github.com/IBM/sarama/init_producer_id_response.go index 0060701899833..256077189e883 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/IBM/sarama/init_producer_id_response.go @@ -69,17 +69,25 @@ func (i *InitProducerIDResponse) headerVersion() int16 { return 0 } +func (i *InitProducerIDResponse) isValidVersion() bool { + return i.Version >= 0 && i.Version <= 4 +} + func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { switch i.Version { - case 2: - fallthrough + case 4: + return V2_7_0_0 case 3: + return V2_5_0_0 + case 2: return V2_4_0_0 - case 0: - fallthrough case 1: - fallthrough + return V2_0_0_0 default: return V0_11_0_0 } } + +func (r *InitProducerIDResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go similarity index 89% rename from vendor/github.com/Shopify/sarama/interceptors.go rename to vendor/github.com/IBM/sarama/interceptors.go index d0d33e526f8e0..d4dc23cc4c1b3 100644 --- a/vendor/github.com/Shopify/sarama/interceptors.go +++ b/vendor/github.com/IBM/sarama/interceptors.go @@ -25,7 +25,7 @@ type ConsumerInterceptor interface { func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) { defer func() { if r := recover(); r != nil { - Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r) + Logger.Printf("Error when calling producer interceptor: %v, %v", interceptor, r) } }() @@ -35,7 +35,7 @@ func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerIntercept func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) { defer func() { if r := recover(); r != nil { - Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r) + Logger.Printf("Error when calling consumer interceptor: %v, %v", interceptor, r) } }() diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go similarity index 70% rename from vendor/github.com/Shopify/sarama/join_group_request.go rename to vendor/github.com/IBM/sarama/join_group_request.go index 432338cd59764..3ab69c4984dc5 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/IBM/sarama/join_group_request.go @@ -1,7 +1,9 @@ package sarama type GroupProtocol struct { - Name string + // Name contains the protocol name. + Name string + // Metadata contains the protocol metadata. Metadata []byte } @@ -25,14 +27,30 @@ func (p *GroupProtocol) encode(pe packetEncoder) (err error) { } type JoinGroupRequest struct { - Version int16 - GroupId string - SessionTimeout int32 - RebalanceTimeout int32 - MemberId string - GroupInstanceId *string - ProtocolType string - GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + // Version defines the protocol version to use for encode and decode + Version int16 + // GroupId contains the group identifier. + GroupId string + // SessionTimeout specifies that the coordinator should consider the consumer + // dead if it receives no heartbeat after this timeout in milliseconds. + SessionTimeout int32 + // RebalanceTimeout contains the maximum time in milliseconds that the + // coordinator will wait for each member to rejoin when rebalancing the + // group. + RebalanceTimeout int32 + // MemberId contains the member id assigned by the group coordinator. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. + GroupInstanceId *string + // ProtocolType contains the unique name the for class of protocols + // implemented by the group we want to join. + ProtocolType string + // GroupProtocols contains the list of protocols that the member supports. + // deprecated; use OrderedGroupProtocols + GroupProtocols map[string][]byte + // OrderedGroupProtocols contains an ordered list of protocols that the member + // supports. OrderedGroupProtocols []*GroupProtocol } @@ -150,16 +168,26 @@ func (r *JoinGroupRequest) headerVersion() int16 { return 1 } +func (r *JoinGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 5 +} + func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 4, 5: + case 5: return V2_3_0_0 - case 2, 3: + case 4: + return V2_2_0_0 + case 3: + return V2_0_0_0 + case 2: return V0_11_0_0 case 1: return V0_10_1_0 + case 0: + return V0_10_0_0 default: - return V0_9_0_0 + return V2_3_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go similarity index 68% rename from vendor/github.com/Shopify/sarama/join_group_response.go rename to vendor/github.com/IBM/sarama/join_group_response.go index d8aa1f0023d4b..643fddc6b57a0 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/IBM/sarama/join_group_response.go @@ -1,20 +1,35 @@ package sarama +import "time" + type JoinGroupResponse struct { - Version int16 - ThrottleTime int32 - Err KError - GenerationId int32 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration for which the request was throttled due + // to a quota violation, or zero if the request did not violate any quota. + ThrottleTime int32 + // Err contains the error code, or 0 if there was no error. + Err KError + // GenerationId contains the generation ID of the group. + GenerationId int32 + // GroupProtocol contains the group protocol selected by the coordinator. GroupProtocol string - LeaderId string - MemberId string - Members []GroupMember + // LeaderId contains the leader of the group. + LeaderId string + // MemberId contains the member ID assigned by the group coordinator. + MemberId string + // Members contains the per-group-member information. + Members []GroupMember } type GroupMember struct { - MemberId string + // MemberId contains the group member ID. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. GroupInstanceId *string - Metadata []byte + // Metadata contains the group member metadata. + Metadata []byte } func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { @@ -145,15 +160,29 @@ func (r *JoinGroupResponse) headerVersion() int16 { return 0 } +func (r *JoinGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 5 +} + func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 3, 4, 5: + case 5: return V2_3_0_0 + case 4: + return V2_2_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 + case 0: + return V0_10_0_0 default: - return V0_9_0_0 + return V2_3_0_0 } } + +func (r *JoinGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go similarity index 79% rename from vendor/github.com/Shopify/sarama/kerberos_client.go rename to vendor/github.com/IBM/sarama/kerberos_client.go index 01a53193bb95c..289126879b7fe 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/IBM/sarama/kerberos_client.go @@ -3,6 +3,7 @@ package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) @@ -32,13 +33,23 @@ func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client - if config.AuthType == KRB5_KEYTAB_AUTH { + switch config.AuthType { + case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) - } else { + case KRB5_CCACHE_AUTH: + cc, err := credentials.LoadCCache(config.CCachePath) + if err != nil { + return nil, err + } + client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + if err != nil { + return nil, err + } + default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go similarity index 88% rename from vendor/github.com/Shopify/sarama/leave_group_request.go rename to vendor/github.com/IBM/sarama/leave_group_request.go index 741b7290a8dff..9222e51049496 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/IBM/sarama/leave_group_request.go @@ -81,10 +81,21 @@ func (r *LeaveGroupRequest) headerVersion() int16 { return 1 } +func (r *LeaveGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: - return V2_3_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go similarity index 83% rename from vendor/github.com/Shopify/sarama/leave_group_response.go rename to vendor/github.com/IBM/sarama/leave_group_response.go index 18ed357e830b1..f24c24867e94e 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/IBM/sarama/leave_group_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type MemberResponse struct { MemberId string GroupInstanceId *string @@ -83,10 +85,25 @@ func (r *LeaveGroupResponse) headerVersion() int16 { return 0 } +func (r *LeaveGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: - return V2_3_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 +} + +func (r *LeaveGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/length_field.go rename to vendor/github.com/IBM/sarama/length_field.go diff --git a/vendor/github.com/IBM/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go new file mode 100644 index 0000000000000..4d5f9e40d1581 --- /dev/null +++ b/vendor/github.com/IBM/sarama/list_groups_request.go @@ -0,0 +1,82 @@ +package sarama + +type ListGroupsRequest struct { + Version int16 + StatesFilter []string // version 4 or later +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + if r.Version >= 4 { + pe.putCompactArrayLength(len(r.StatesFilter)) + for _, filter := range r.StatesFilter { + err := pe.putCompactString(filter) + if err != nil { + return err + } + } + } + if r.Version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 4 { + filterLen, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if filterLen > 0 { + r.StatesFilter = make([]string, filterLen) + for i := 0; i < filterLen; i++ { + if r.StatesFilter[i], err = pd.getCompactString(); err != nil { + return err + } + } + } + } + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return r.Version +} + +func (r *ListGroupsRequest) headerVersion() int16 { + if r.Version >= 3 { + return 2 + } + return 1 +} + +func (r *ListGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 4: + return V2_6_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_6_0_0 + } +} diff --git a/vendor/github.com/IBM/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go new file mode 100644 index 0000000000000..62948c31fc571 --- /dev/null +++ b/vendor/github.com/IBM/sarama/list_groups_response.go @@ -0,0 +1,173 @@ +package sarama + +type ListGroupsResponse struct { + Version int16 + ThrottleTime int32 + Err KError + Groups map[string]string + GroupsData map[string]GroupData // version 4 or later +} + +type GroupData struct { + GroupState string // version 4 or later +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } + + pe.putInt16(int16(r.Err)) + + if r.Version <= 2 { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + } else { + pe.putCompactArrayLength(len(r.Groups)) + for groupId, protocolType := range r.Groups { + if err := pe.putCompactString(groupId); err != nil { + return err + } + if err := pe.putCompactString(protocolType); err != nil { + return err + } + + if r.Version >= 4 { + groupData := r.GroupsData[groupId] + if err := pe.putCompactString(groupData.GroupState); err != nil { + return err + } + } + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + r.Version = version + if r.Version >= 1 { + var err error + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + var n int + if r.Version <= 2 { + n, err = pd.getArrayLength() + } else { + n, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == 0 { + r.Groups = make(map[string]string) + if r.Version >= 4 { + r.GroupsData = make(map[string]GroupData) + } + } + + var groupId, protocolType string + if r.Version <= 2 { + groupId, err = pd.getString() + if err != nil { + return err + } + protocolType, err = pd.getString() + if err != nil { + return err + } + } else { + groupId, err = pd.getCompactString() + if err != nil { + return err + } + protocolType, err = pd.getCompactString() + if err != nil { + return err + } + } + + r.Groups[groupId] = protocolType + + if r.Version >= 4 { + groupState, err := pd.getCompactString() + if err != nil { + return err + } + r.GroupsData[groupId] = GroupData{ + GroupState: groupState, + } + } + + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return r.Version +} + +func (r *ListGroupsResponse) headerVersion() int16 { + if r.Version >= 3 { + return 1 + } + return 0 +} + +func (r *ListGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 4: + return V2_6_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_6_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_request.go index c1ffa9ba02b71..c7ad5e9814553 100644 --- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go +++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go @@ -83,6 +83,10 @@ func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { return 2 } +func (r *ListPartitionReassignmentsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_response.go index 4baa6a08e83e6..426f1c77154f2 100644 --- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go +++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type PartitionReplicaReassignmentsStatus struct { Replicas []int32 AddingReplicas []int32 @@ -164,6 +166,14 @@ func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { return 1 } +func (r *ListPartitionReassignmentsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/IBM/sarama/message.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message.go rename to vendor/github.com/IBM/sarama/message.go diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message_set.go rename to vendor/github.com/IBM/sarama/message_set.go diff --git a/vendor/github.com/IBM/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go new file mode 100644 index 0000000000000..e76073ea0d519 --- /dev/null +++ b/vendor/github.com/IBM/sarama/metadata_request.go @@ -0,0 +1,240 @@ +package sarama + +import "encoding/base64" + +type Uuid [16]byte + +func (u Uuid) String() string { + return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:]) +} + +var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +type MetadataRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // Topics contains the topics to fetch metadata for. + Topics []string + // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so. + AllowAutoTopicCreation bool + IncludeClusterAuthorizedOperations bool // version 8 and up + IncludeTopicAuthorizedOperations bool // version 8 and up +} + +func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest { + m := &MetadataRequest{Topics: topics} + if version.IsAtLeast(V2_8_0_0) { + m.Version = 10 + } else if version.IsAtLeast(V2_4_0_0) { + m.Version = 9 + } else if version.IsAtLeast(V2_4_0_0) { + m.Version = 8 + } else if version.IsAtLeast(V2_1_0_0) { + m.Version = 7 + } else if version.IsAtLeast(V2_0_0_0) { + m.Version = 6 + } else if version.IsAtLeast(V1_0_0_0) { + m.Version = 5 + } else if version.IsAtLeast(V0_11_0_0) { + m.Version = 4 + } else if version.IsAtLeast(V0_10_1_0) { + m.Version = 2 + } else if version.IsAtLeast(V0_10_0_0) { + m.Version = 1 + } + return m +} + +func (r *MetadataRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 10 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} + } + if r.Version == 0 || len(r.Topics) > 0 { + if r.Version < 9 { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else if r.Version == 9 { + pe.putCompactArrayLength(len(r.Topics)) + for _, topicName := range r.Topics { + if err := pe.putCompactString(topicName); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + } else { // r.Version = 10 + pe.putCompactArrayLength(len(r.Topics)) + for _, topicName := range r.Topics { + if err := pe.putRawBytes(NullUUID); err != nil { + return err + } + // Avoid implicit memory aliasing in for loop + tn := topicName + if err := pe.putNullableCompactString(&tn); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + } + } else { + if r.Version < 9 { + pe.putInt32(-1) + } else { + pe.putCompactArrayLength(-1) + } + } + + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) + } + if r.Version > 7 { + pe.putBool(r.IncludeClusterAuthorizedOperations) + pe.putBool(r.IncludeTopicAuthorizedOperations) + } + if r.Version > 8 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version < 9 { + size, err := pd.getInt32() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + } + } else if r.Version == 9 { + size, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + } + for i := range r.Topics { + topic, err := pd.getCompactString() + if err != nil { + return err + } + r.Topics[i] = topic + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } else { // version 10+ + size, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if size > 0 { + r.Topics = make([]string, size) + } + for i := range r.Topics { + if _, err = pd.getRawBytes(16); err != nil { // skip UUID + return err + } + topic, err := pd.getCompactNullableString() + if err != nil { + return err + } + if topic != nil { + r.Topics[i] = *topic + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if r.Version >= 4 { + if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil { + return err + } + } + + if r.Version > 7 { + includeClusterAuthz, err := pd.getBool() + if err != nil { + return err + } + r.IncludeClusterAuthorizedOperations = includeClusterAuthz + includeTopicAuthz, err := pd.getBool() + if err != nil { + return err + } + r.IncludeTopicAuthorizedOperations = includeTopicAuthz + } + if r.Version > 8 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return r.Version +} + +func (r *MetadataRequest) headerVersion() int16 { + if r.Version >= 9 { + return 2 + } + return 1 +} + +func (r *MetadataRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 10 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 10: + return V2_8_0_0 + case 9: + return V2_4_0_0 + case 8: + return V2_3_0_0 + case 7: + return V2_1_0_0 + case 6: + return V2_0_0_0 + case 5: + return V1_0_0_0 + case 3, 4: + return V0_11_0_0 + case 2: + return V0_10_1_0 + case 1: + return V0_10_0_0 + case 0: + return V0_8_2_0 + default: + return V2_8_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go similarity index 56% rename from vendor/github.com/Shopify/sarama/metadata_response.go rename to vendor/github.com/IBM/sarama/metadata_response.go index 10a56877de110..dfb5d3a5bd51d 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/IBM/sarama/metadata_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + // PartitionMetadata contains each partition in the topic. type PartitionMetadata struct { // Version defines the protocol version to use for encode and decode @@ -42,16 +44,38 @@ func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) } } - if p.Replicas, err = pd.getInt32Array(); err != nil { + if p.Version < 9 { + p.Replicas, err = pd.getInt32Array() + } else { + p.Replicas, err = pd.getCompactInt32Array() + } + if err != nil { return err } - if p.Isr, err = pd.getInt32Array(); err != nil { + if p.Version < 9 { + p.Isr, err = pd.getInt32Array() + } else { + p.Isr, err = pd.getCompactInt32Array() + } + if err != nil { return err } if p.Version >= 5 { - if p.OfflineReplicas, err = pd.getInt32Array(); err != nil { + if p.Version < 9 { + p.OfflineReplicas, err = pd.getInt32Array() + } else { + p.OfflineReplicas, err = pd.getCompactInt32Array() + } + if err != nil { + return err + } + } + + if p.Version >= 9 { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { return err } } @@ -71,20 +95,39 @@ func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) pe.putInt32(p.LeaderEpoch) } - if err := pe.putInt32Array(p.Replicas); err != nil { + if p.Version < 9 { + err = pe.putInt32Array(p.Replicas) + } else { + err = pe.putCompactInt32Array(p.Replicas) + } + if err != nil { return err } - if err := pe.putInt32Array(p.Isr); err != nil { + if p.Version < 9 { + err = pe.putInt32Array(p.Isr) + } else { + err = pe.putCompactInt32Array(p.Isr) + } + if err != nil { return err } if p.Version >= 5 { - if err := pe.putInt32Array(p.OfflineReplicas); err != nil { + if p.Version < 9 { + err = pe.putInt32Array(p.OfflineReplicas) + } else { + err = pe.putCompactInt32Array(p.OfflineReplicas) + } + if err != nil { return err } } + if p.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -96,10 +139,12 @@ type TopicMetadata struct { Err KError // Name contains the topic name. Name string + Uuid Uuid // IsInternal contains a True if the topic is internal. IsInternal bool // Partitions contains each partition in the topic. - Partitions []*PartitionMetadata + Partitions []*PartitionMetadata + TopicAuthorizedOperations int32 // Only valid for Version >= 8 } func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { @@ -110,21 +155,44 @@ func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { } t.Err = KError(tmp) - if t.Name, err = pd.getString(); err != nil { + if t.Version < 9 { + t.Name, err = pd.getString() + } else { + t.Name, err = pd.getCompactString() + } + if err != nil { return err } + if t.Version >= 10 { + uuid, err := pd.getRawBytes(16) + if err != nil { + return err + } + t.Uuid = [16]byte{} + for i := 0; i < 16; i++ { + t.Uuid[i] = uuid[i] + } + } + if t.Version >= 1 { - if t.IsInternal, err = pd.getBool(); err != nil { + t.IsInternal, err = pd.getBool() + if err != nil { return err } } - if numPartitions, err := pd.getArrayLength(); err != nil { + var n int + if t.Version < 9 { + n, err = pd.getArrayLength() + } else { + n, err = pd.getCompactArrayLength() + } + if err != nil { return err } else { - t.Partitions = make([]*PartitionMetadata, numPartitions) - for i := 0; i < numPartitions; i++ { + t.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { block := &PartitionMetadata{} if err := block.decode(pd, t.Version); err != nil { return err @@ -133,6 +201,20 @@ func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { } } + if t.Version >= 8 { + t.TopicAuthorizedOperations, err = pd.getInt32() + if err != nil { + return err + } + } + + if t.Version >= 9 { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + return nil } @@ -140,16 +222,33 @@ func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { t.Version = version pe.putInt16(int16(t.Err)) - if err := pe.putString(t.Name); err != nil { + if t.Version < 9 { + err = pe.putString(t.Name) + } else { + err = pe.putCompactString(t.Name) + } + if err != nil { return err } + if t.Version >= 10 { + err = pe.putRawBytes(t.Uuid[:]) + if err != nil { + return err + } + } + if t.Version >= 1 { pe.putBool(t.IsInternal) } - if err := pe.putArrayLength(len(t.Partitions)); err != nil { - return err + if t.Version < 9 { + err = pe.putArrayLength(len(t.Partitions)) + if err != nil { + return err + } + } else { + pe.putCompactArrayLength(len(t.Partitions)) } for _, block := range t.Partitions { if err := block.encode(pe, t.Version); err != nil { @@ -157,6 +256,14 @@ func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { } } + if t.Version >= 8 { + pe.putInt32(t.TopicAuthorizedOperations) + } + + if t.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -172,7 +279,8 @@ type MetadataResponse struct { // ControllerID contains the ID of the controller broker. ControllerID int32 // Topics contains each topic in the response. - Topics []*TopicMetadata + Topics []*TopicMetadata + ClusterAuthorizedOperations int32 // Only valid for Version >= 8 } func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { @@ -183,12 +291,18 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } } - n, err := pd.getArrayLength() + var brokerArrayLen int + if r.Version < 9 { + brokerArrayLen, err = pd.getArrayLength() + } else { + brokerArrayLen, err = pd.getCompactArrayLength() + } if err != nil { return err } - r.Brokers = make([]*Broker, n) - for i := 0; i < n; i++ { + + r.Brokers = make([]*Broker, brokerArrayLen) + for i := 0; i < brokerArrayLen; i++ { r.Brokers[i] = new(Broker) err = r.Brokers[i].decode(pd, version) if err != nil { @@ -197,7 +311,12 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } if r.Version >= 2 { - if r.ClusterID, err = pd.getNullableString(); err != nil { + if r.Version < 9 { + r.ClusterID, err = pd.getNullableString() + } else { + r.ClusterID, err = pd.getCompactNullableString() + } + if err != nil { return err } } @@ -208,16 +327,36 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } } - if numTopics, err := pd.getArrayLength(); err != nil { - return err + var topicArrayLen int + if version < 9 { + topicArrayLen, err = pd.getArrayLength() } else { - r.Topics = make([]*TopicMetadata, numTopics) - for i := 0; i < numTopics; i++ { - block := &TopicMetadata{} - if err := block.decode(pd, r.Version); err != nil { - return err - } - r.Topics[i] = block + topicArrayLen, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, topicArrayLen) + for i := 0; i < topicArrayLen; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd, version) + if err != nil { + return err + } + } + + if r.Version >= 8 { + r.ClusterAuthorizedOperations, err = pd.getInt32() + if err != nil { + return err + } + } + + if r.Version >= 9 { + _, err := pd.getEmptyTaggedFieldArray() + if err != nil { + return err } } @@ -229,9 +368,15 @@ func (r *MetadataResponse) encode(pe packetEncoder) (err error) { pe.putInt32(r.ThrottleTimeMs) } - if err := pe.putArrayLength(len(r.Brokers)); err != nil { - return err + if r.Version < 9 { + err = pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + } else { + pe.putCompactArrayLength(len(r.Brokers)) } + for _, broker := range r.Brokers { err = broker.encode(pe, r.Version) if err != nil { @@ -240,8 +385,16 @@ func (r *MetadataResponse) encode(pe packetEncoder) (err error) { } if r.Version >= 2 { - if err := pe.putNullableString(r.ClusterID); err != nil { - return err + if r.Version < 9 { + err = pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } else { + err = pe.putNullableCompactString(r.ClusterID) + if err != nil { + return err + } } } @@ -249,7 +402,12 @@ func (r *MetadataResponse) encode(pe packetEncoder) (err error) { pe.putInt32(r.ControllerID) } - if err := pe.putArrayLength(len(r.Topics)); err != nil { + if r.Version < 9 { + err = pe.putArrayLength(len(r.Topics)) + } else { + pe.putCompactArrayLength(len(r.Topics)) + } + if err != nil { return err } for _, block := range r.Topics { @@ -258,6 +416,14 @@ func (r *MetadataResponse) encode(pe packetEncoder) (err error) { } } + if r.Version >= 8 { + pe.putInt32(r.ClusterAuthorizedOperations) + } + + if r.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -270,28 +436,48 @@ func (r *MetadataResponse) version() int16 { } func (r *MetadataResponse) headerVersion() int16 { - return 0 + if r.Version < 9 { + return 0 + } else { + return 1 + } +} + +func (r *MetadataResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 } func (r *MetadataResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_10_0_0 - case 2: - return V0_10_1_0 - case 3, 4: - return V0_11_0_0 - case 5: - return V1_0_0_0 - case 6: - return V2_0_0_0 + case 10: + return V2_8_0_0 + case 9: + return V2_4_0_0 + case 8: + return V2_3_0_0 case 7: return V2_1_0_0 + case 6: + return V2_0_0_0 + case 5: + return V1_0_0_0 + case 3, 4: + return V0_11_0_0 + case 2: + return V0_10_1_0 + case 1: + return V0_10_0_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_8_0_0 } } +func (r *MetadataResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + // testing API func (r *MetadataResponse) AddBroker(addr string, id int32) { @@ -336,7 +522,16 @@ func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID i foundPartition: pmatch.Leader = brokerID pmatch.Replicas = replicas + if pmatch.Replicas == nil { + pmatch.Replicas = []int32{} + } pmatch.Isr = isr + if pmatch.Isr == nil { + pmatch.Isr = []int32{} + } pmatch.OfflineReplicas = offline + if pmatch.OfflineReplicas == nil { + pmatch.OfflineReplicas = []int32{} + } pmatch.Err = err } diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go similarity index 97% rename from vendor/github.com/Shopify/sarama/metrics.go rename to vendor/github.com/IBM/sarama/metrics.go index 7b7705f2e3bf1..de8ad95c749eb 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/IBM/sarama/metrics.go @@ -32,7 +32,7 @@ func getMetricNameForBroker(name string, broker *Broker) string { func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 - return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) + return fmt.Sprintf(name+"-for-topic-%s", strings.ReplaceAll(topic, ".", "_")) } func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go similarity index 87% rename from vendor/github.com/Shopify/sarama/mockbroker.go rename to vendor/github.com/IBM/sarama/mockbroker.go index 628c3cb90c7f3..2c5e7caddec35 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/IBM/sarama/mockbroker.go @@ -10,6 +10,7 @@ import ( "reflect" "strconv" "sync" + "syscall" "time" "github.com/davecgh/go-spew/spew" @@ -98,6 +99,20 @@ func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { }) } +// SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a +// request is received by the broker, it looks up the request type in the map +// and invoke the found RequestHandlerFunc instance to generate an appropriate reply. +func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) { + fnMap := make(map[string]requestHandlerFunc) + for k, v := range handlerMap { + fnMap[k] = v + } + b.setHandler(func(req *request) (res encoderWithHeader) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + return fnMap[reqTypeName](req) + }) +} + // SetNotifier set a function that will get invoked whenever a request has been // processed successfully and will provide the number of bytes read and written func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { @@ -178,7 +193,9 @@ func (b *MockBroker) serverLoop() { i++ } wg.Wait() - Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) + } } func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { @@ -243,8 +260,10 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W for { buffer, err := b.readToBytes(conn) if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) - b.serverError(err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) + b.serverError(err) + } break } @@ -253,8 +272,10 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W req, br, err := decodeRequest(bytes.NewReader(buffer)) bytesRead = br if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) - b.serverError(err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + } break } @@ -280,7 +301,7 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W encodedRes, err := encode(res, nil) if err != nil { - b.serverError(err) + b.serverError(fmt.Errorf("failed to encode %T - %w", res, err)) break } if len(encodedRes) == 0 { @@ -358,21 +379,25 @@ func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) } } -func (b *MockBroker) serverError(err error) { - isConnectionClosedError := false +func isConnectionClosedError(err error) bool { + var result bool opError := &net.OpError{} if errors.As(err, &opError) { - isConnectionClosedError = true + result = true } else if errors.Is(err, io.EOF) { - isConnectionClosedError = true + result = true } else if err.Error() == "use of closed network connection" { - isConnectionClosedError = true + result = true } - if isConnectionClosedError { + return result +} + +func (b *MockBroker) serverError(err error) { + b.t.Helper() + if isConnectionClosedError(err) { return } - b.t.Errorf(err.Error()) } @@ -386,10 +411,29 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { - listener, err := net.Listen("tcp", addr) + var ( + listener net.Listener + err error + ) + + // retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown) + for i := 0; i < 20; i++ { + listener, err = net.Listen("tcp", addr) + if err != nil { + if errors.Is(err, syscall.EADDRINUSE) { + Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr) + time.Sleep(time.Millisecond * 100) + continue + } + t.Fatal(err) + } + break + } + if err != nil { t.Fatal(err) } + return NewMockBrokerListener(t, brokerID, listener) } diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockkerberos.go rename to vendor/github.com/IBM/sarama/mockkerberos.go diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go similarity index 90% rename from vendor/github.com/Shopify/sarama/mockresponses.go rename to vendor/github.com/IBM/sarama/mockresponses.go index 15b4367f998b8..d09415b49a045 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/IBM/sarama/mockresponses.go @@ -13,6 +13,7 @@ type TestReporter interface { Errorf(string, ...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) + Helper() } // MockResponse is a response builder interface it defines one method that @@ -82,9 +83,9 @@ func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*ListGroupsRequest) - _ = request response := &ListGroupsResponse{ - Groups: m.groups, + Version: request.Version, + Groups: m.groups, } return response } @@ -114,7 +115,7 @@ func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, descrip func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*DescribeGroupsRequest) - response := &DescribeGroupsResponse{} + response := &DescribeGroupsResponse{Version: request.version()} for _, requestedGroup := range request.Groups { if group, ok := m.groups[requestedGroup]; ok { response.Groups = append(response.Groups, group) @@ -134,6 +135,7 @@ func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHe // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { controllerID int32 + errors map[string]KError leaders map[string]map[int32]int32 brokers map[string]int32 t TestReporter @@ -141,12 +143,18 @@ type MockMetadataResponse struct { func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { return &MockMetadataResponse{ + errors: make(map[string]KError), leaders: make(map[string]map[int32]int32), brokers: make(map[string]int32), t: t, } } +func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse { + mmr.errors[topic] = kerror + return mmr +} + func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { partitions := mmr.leaders[topic] if partitions == nil { @@ -190,10 +198,22 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } + for topic, err := range mmr.errors { + metadataResponse.AddTopic(topic, err) + } return metadataResponse } for _, topic := range metadataRequest.Topics { - for partition, brokerID := range mmr.leaders[topic] { + leaders, ok := mmr.leaders[topic] + if !ok { + if err, ok := mmr.errors[topic]; ok { + metadataResponse.AddTopic(topic, err) + } else { + metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition) + } + continue + } + for partition, brokerID := range leaders { metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } @@ -233,7 +253,7 @@ func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetResponse := &OffsetResponse{Version: offsetRequest.Version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { - offset := mor.getOffset(topic, partition, block.time) + offset := mor.getOffset(topic, partition, block.timestamp) offsetResponse.AddTopicPartition(topic, partition, offset) } } @@ -410,7 +430,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup - res := &ConsumerMetadataResponse{} + res := &ConsumerMetadataResponse{Version: req.version()} v := mr.coordinators[group] switch v := v.(type) { case *MockBroker: @@ -458,8 +478,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) - res := &FindCoordinatorResponse{} - res.Version = req.Version + res := &FindCoordinatorResponse{Version: req.version()} var v interface{} switch req.CoordinatorType { case CoordinatorGroup: @@ -507,7 +526,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup - res := &OffsetCommitResponse{} + res := &OffsetCommitResponse{Version: req.version()} for topic, partitions := range req.blocks { for partition := range partitions { res.AddError(topic, partition, mr.getError(group, topic, partition)) @@ -564,7 +583,10 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ - Version: mr.version, + Version: req.version(), + } + if mr.version > 0 { + res.Version = mr.version } for topic, partitions := range req.records { for partition := range partitions { @@ -667,7 +689,8 @@ func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHea } type MockDeleteTopicsResponse struct { - t TestReporter + t TestReporter + error KError } func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { @@ -676,16 +699,21 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) - res := &DeleteTopicsResponse{} + res := &DeleteTopicsResponse{Version: req.version()} res.TopicErrorCodes = make(map[string]KError) for _, topic := range req.Topics { - res.TopicErrorCodes[topic] = ErrNoError + res.TopicErrorCodes[topic] = mr.error } res.Version = req.Version return res } +func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse { + mr.error = kerror + return mr +} + type MockCreatePartitionsResponse struct { t TestReporter } @@ -696,7 +724,7 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) - res := &CreatePartitionsResponse{} + res := &CreatePartitionsResponse{Version: req.version()} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) for topic := range req.TopicPartitions { @@ -724,7 +752,7 @@ func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartit func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterPartitionReassignmentsRequest) _ = req - res := &AlterPartitionReassignmentsResponse{} + res := &AlterPartitionReassignmentsResponse{Version: req.version()} return res } @@ -739,7 +767,7 @@ func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitio func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ListPartitionReassignmentsRequest) _ = req - res := &ListPartitionReassignmentsResponse{} + res := &ListPartitionReassignmentsResponse{Version: req.version()} for topic, partitions := range req.blocks { for _, partition := range partitions { @@ -760,7 +788,7 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) - res := &DeleteRecordsResponse{} + res := &DeleteRecordsResponse{Version: req.version()} res.Topics = make(map[string]*DeleteRecordsResponseTopic) for topic, deleteRecordRequestTopic := range req.Topics { @@ -906,7 +934,7 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) - res := &AlterConfigsResponse{} + res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -928,7 +956,7 @@ func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsR func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) - res := &AlterConfigsResponse{} + res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -951,7 +979,7 @@ func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlte func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) - res := &IncrementalAlterConfigsResponse{} + res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -973,7 +1001,7 @@ func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIn func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) - res := &IncrementalAlterConfigsResponse{} + res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -996,7 +1024,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) - res := &CreateAclsResponse{} + res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) @@ -1014,7 +1042,7 @@ func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseE func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) - res := &CreateAclsResponse{} + res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest}) @@ -1032,7 +1060,7 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) - res := &DescribeAclsResponse{} + res := &DescribeAclsResponse{Version: req.version()} res.Err = ErrNoError acl := &ResourceAcls{} if req.ResourceName != nil { @@ -1075,11 +1103,12 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*SaslAuthenticateRequest) - res := &SaslAuthenticateResponse{} - res.Version = req.Version - res.Err = msar.kerror - res.SaslAuthBytes = msar.saslAuthBytes - res.SessionLifetimeMs = msar.sessionLifetimeMs + res := &SaslAuthenticateResponse{ + Version: req.version(), + Err: msar.kerror, + SaslAuthBytes: msar.saslAuthBytes, + SessionLifetimeMs: msar.sessionLifetimeMs, + } return res } @@ -1113,7 +1142,8 @@ func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { } func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { - res := &SaslHandshakeResponse{} + req := reqBody.(*SaslHandshakeRequest) + res := &SaslHandshakeResponse{Version: req.version()} res.Err = mshr.kerror res.EnabledMechanisms = mshr.enabledMechanisms return res @@ -1135,7 +1165,7 @@ func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) - res := &DeleteAclsResponse{} + res := &DeleteAclsResponse{Version: req.version()} for range req.Filters { response := &FilterResponse{Err: ErrNoError} @@ -1160,7 +1190,9 @@ func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDelete } func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteGroupsRequest) resp := &DeleteGroupsResponse{ + Version: req.version(), GroupErrorCodes: map[string]KError{}, } for _, group := range m.deletedGroups { @@ -1189,7 +1221,9 @@ func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic stri } func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteOffsetsRequest) resp := &DeleteOffsetsResponse{ + Version: req.version(), ErrorCode: m.errorCode, Errors: map[string]map[int32]KError{ m.topic: {m.partition: m.errorPartition}, @@ -1282,8 +1316,10 @@ func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { } func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*LeaveGroupRequest) resp := &LeaveGroupResponse{ - Err: m.Err, + Version: req.version(), + Err: m.Err, } return resp } @@ -1305,7 +1341,9 @@ func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { } func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*SyncGroupRequest) resp := &SyncGroupResponse{ + Version: req.version(), Err: m.Err, MemberAssignment: m.MemberAssignment, } @@ -1337,7 +1375,10 @@ func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { } func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { - resp := &HeartbeatResponse{} + req := reqBody.(*HeartbeatRequest) + resp := &HeartbeatResponse{ + Version: req.version(), + } return resp } @@ -1382,7 +1423,9 @@ func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartiti } func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeLogDirsRequest) resp := &DescribeLogDirsResponse{ + Version: req.version(), LogDirs: m.logDirs, } return resp @@ -1424,3 +1467,43 @@ func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeade } return res } + +// MockInitProducerIDResponse is an `InitPorducerIDResponse` builder. +type MockInitProducerIDResponse struct { + producerID int64 + producerEpoch int16 + err KError + t TestReporter +} + +func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse { + return &MockInitProducerIDResponse{ + t: t, + } +} + +func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse { + m.producerID = int64(id) + return m +} + +func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse { + m.producerEpoch = int16(epoch) + return m +} + +func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse { + m.err = err + return m +} + +func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*InitProducerIDRequest) + res := &InitProducerIDResponse{ + Version: req.Version, + Err: m.err, + ProducerID: m.producerID, + ProducerEpoch: m.producerEpoch, + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/offset_commit_request.go rename to vendor/github.com/IBM/sarama/offset_commit_request.go index 5dd88220d97ff..45d1977d4177e 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/offset_commit_request.go @@ -201,26 +201,34 @@ func (r *OffsetCommitRequest) headerVersion() int16 { return 1 } +func (r *OffsetCommitRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5, 6: - return V2_1_0_0 case 7: return V2_3_0_0 + case 5, 6: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_9_0_0 + case 0, 1: + return V0_8_2_0 default: - return MinVersion + return V2_4_0_0 } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) +} + +func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go similarity index 87% rename from vendor/github.com/Shopify/sarama/offset_commit_response.go rename to vendor/github.com/IBM/sarama/offset_commit_response.go index 4bed269aa5be3..523508fa48536 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/IBM/sarama/offset_commit_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type OffsetCommitResponse struct { Version int16 ThrottleTimeMs int32 @@ -98,19 +100,29 @@ func (r *OffsetCommitResponse) headerVersion() int16 { return 0 } +func (r *OffsetCommitResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - case 3: - return V0_11_0_0 + case 7: + return V2_3_0_0 + case 5, 6: + return V2_1_0_0 case 4: return V2_0_0_0 - case 5, 6, 7: - return V2_3_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_9_0_0 + case 0, 1: + return V0_8_2_0 default: - return MinVersion + return V2_4_0_0 } } + +func (r *OffsetCommitResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go similarity index 73% rename from vendor/github.com/Shopify/sarama/offset_fetch_request.go rename to vendor/github.com/IBM/sarama/offset_fetch_request.go index 7e147eb60c192..0c9b8405bda02 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/IBM/sarama/offset_fetch_request.go @@ -7,6 +7,43 @@ type OffsetFetchRequest struct { partitions map[string][]int32 } +func NewOffsetFetchRequest( + version KafkaVersion, + group string, + partitions map[string][]int32, +) *OffsetFetchRequest { + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: partitions, + } + if version.IsAtLeast(V2_5_0_0) { + // Version 7 is adding the require stable flag. + request.Version = 7 + } else if version.IsAtLeast(V2_4_0_0) { + // Version 6 is the first flexible version. + request.Version = 6 + } else if version.IsAtLeast(V2_1_0_0) { + // Version 3, 4, and 5 are the same as version 2. + request.Version = 5 + } else if version.IsAtLeast(V2_0_0_0) { + request.Version = 4 + } else if version.IsAtLeast(V0_11_0_0) { + request.Version = 3 + } else if version.IsAtLeast(V0_10_2_0) { + // Starting in version 2, the request can contain a null topics array to indicate that offsets + // for all topics should be fetched. It also returns a top level error code + // for group or coordinator level errors. + request.Version = 2 + } else if version.IsAtLeast(V0_8_2_0) { + // In version 0, the request read offsets from ZK. + // + // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic. + request.Version = 1 + } + + return request +} + func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} @@ -171,24 +208,30 @@ func (r *OffsetFetchRequest) headerVersion() int16 { return 1 } +func (r *OffsetFetchRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_10_2_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5: - return V2_1_0_0 - case 6: - return V2_4_0_0 case 7: return V2_5_0_0 + case 6: + return V2_4_0_0 + case 5: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_2_0 + case 1: + return V0_8_2_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_5_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/offset_fetch_response.go rename to vendor/github.com/IBM/sarama/offset_fetch_response.go index 19449220f2853..7ce7927d8d440 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/IBM/sarama/offset_fetch_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type OffsetFetchResponseBlock struct { Offset int64 LeaderEpoch int32 @@ -20,6 +22,8 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err if err != nil { return err } + } else { + b.LeaderEpoch = -1 } if isFlexible { @@ -234,27 +238,37 @@ func (r *OffsetFetchResponse) headerVersion() int16 { return 0 } +func (r *OffsetFetchResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_10_2_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5: - return V2_1_0_0 - case 6: - return V2_4_0_0 case 7: return V2_5_0_0 + case 6: + return V2_4_0_0 + case 5: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_2_0 + case 1: + return V0_8_2_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_5_0_0 } } +func (r *OffsetFetchResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { if r.Blocks == nil { return nil diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go similarity index 84% rename from vendor/github.com/Shopify/sarama/offset_manager.go rename to vendor/github.com/IBM/sarama/offset_manager.go index 1ea15ff93933a..2948651272b02 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -153,11 +153,8 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri return om.fetchInitialOffset(topic, partition, retries-1) } - req := new(OffsetFetchRequest) - req.Version = 1 - req.ConsumerGroup = om.group - req.AddPartition(topic, partition) - + partitions := map[string][]int32{topic: {partition}} + req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions) resp, err := broker.FetchOffset(req) if err != nil { if retries <= 0 { @@ -254,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -273,27 +283,68 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if om.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: om.group, - ConsumerID: om.memberID, - ConsumerGroupGeneration: om.generation, - } - } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: om.group, - ConsumerID: om.memberID, - ConsumerGroupGeneration: om.generation, + r := &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + // Version 1 adds timestamp and group membership information, as well as the commit timestamp. + // + // Version 2 adds retention time. It removes the commit timestamp added in version 1. + if om.conf.Version.IsAtLeast(V0_9_0_0) { + r.Version = 2 + } + // Version 3 and 4 are the same as version 2. + if om.conf.Version.IsAtLeast(V0_11_0_0) { + r.Version = 3 + } + if om.conf.Version.IsAtLeast(V2_0_0_0) { + r.Version = 4 + } + // Version 5 removes the retention time, which is now controlled only by a broker configuration. + // + // Version 6 adds the leader epoch for fencing. + if om.conf.Version.IsAtLeast(V2_1_0_0) { + r.Version = 6 + } + // version 7 adds a new field called groupInstanceId to indicate member identity across restarts. + if om.conf.Version.IsAtLeast(V2_3_0_0) { + r.Version = 7 + r.GroupInstanceId = om.groupInstanceId + } + + // commit timestamp was only briefly supported in V1 where we set it to + // ReceiveTime (-1) to tell the broker to set it to the time when the commit + // request was received + var commitTimestamp int64 + if r.Version == 1 { + commitTimestamp = ReceiveTime + } + + // request controlled retention was only supported from V2-V4 (it became + // broker-only after that) so if the user has set the config options then + // flow those through as retention time on the commit request. + if r.Version >= 2 && r.Version < 5 { + // Map Sarama's default of 0 to Kafka's default of -1 + r.RetentionTime = -1 + if om.conf.Consumer.Offsets.Retention > 0 { + r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond) } } @@ -304,17 +355,12 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) + r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata) } pom.lock.Unlock() } } - if om.groupInstanceId != nil { - r.Version = 7 - r.GroupInstanceId = om.groupInstanceId - } - if len(r.blocks) > 0 { return r } @@ -359,13 +405,13 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) - // TODO close the whole consumer for instacne fenced.... + // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go similarity index 76% rename from vendor/github.com/Shopify/sarama/offset_request.go rename to vendor/github.com/IBM/sarama/offset_request.go index 4c9ce4df552ca..13de0a89f145c 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/IBM/sarama/offset_request.go @@ -1,28 +1,46 @@ package sarama type offsetRequestBlock struct { - time int64 - maxOffsets int32 // Only used in version 0 + // currentLeaderEpoch contains the current leader epoch (used in version 4+). + currentLeaderEpoch int32 + // timestamp contains the current timestamp. + timestamp int64 + // maxNumOffsets contains the maximum number of offsets to report. + maxNumOffsets int32 // Only used in version 0 } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(b.time) + if version >= 4 { + pe.putInt32(b.currentLeaderEpoch) + } + + pe.putInt64(b.timestamp) + if version == 0 { - pe.putInt32(b.maxOffsets) + pe.putInt32(b.maxNumOffsets) } return nil } func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if b.time, err = pd.getInt64(); err != nil { + b.currentLeaderEpoch = -1 + if version >= 4 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + + if b.timestamp, err = pd.getInt64(); err != nil { return err } + if version == 0 { - if b.maxOffsets, err = pd.getInt32(); err != nil { + if b.maxNumOffsets, err = pd.getInt32(); err != nil { return err } } + return nil } @@ -137,14 +155,24 @@ func (r *OffsetRequest) headerVersion() int16 { return 1 } +func (r *OffsetRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_10_1_0 + case 4: + return V2_1_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 + case 1: + return V0_10_1_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_0_0_0 } } @@ -160,7 +188,7 @@ func (r *OffsetRequest) ReplicaID() int32 { return -1 } -func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetRequestBlock) } @@ -170,9 +198,10 @@ func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, ma } tmp := new(offsetRequestBlock) - tmp.time = time + tmp.currentLeaderEpoch = -1 + tmp.timestamp = timestamp if r.Version == 0 { - tmp.maxOffsets = maxOffsets + tmp.maxNumOffsets = maxOffsets } r.blocks[topic][partitionID] = tmp diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go similarity index 73% rename from vendor/github.com/Shopify/sarama/offset_response.go rename to vendor/github.com/IBM/sarama/offset_response.go index ffe84664c59e0..6c62e07913bae 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/IBM/sarama/offset_response.go @@ -1,10 +1,17 @@ package sarama +import "time" + type OffsetResponseBlock struct { - Err KError - Offsets []int64 // Version 0 - Offset int64 // Version 1 - Timestamp int64 // Version 1 + Err KError + // Offsets contains the result offsets (for V0/V1 compatibility) + Offsets []int64 // Version 0 + // Timestamp contains the timestamp associated with the returned offset. + Timestamp int64 // Version 1 + // Offset contains the returned offset. + Offset int64 // Version 1 + // LeaderEpoch contains the current leader epoch of the partition. + LeaderEpoch int32 } func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -16,22 +23,29 @@ func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error if version == 0 { b.Offsets, err = pd.getInt64Array() - return err } - b.Timestamp, err = pd.getInt64() - if err != nil { - return err - } + if version >= 1 { + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } - b.Offset, err = pd.getInt64() - if err != nil { - return err + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} } - // For backwards compatibility put the offset in the offsets array too - b.Offsets = []int64{b.Offset} + if version >= 4 { + if b.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } return nil } @@ -43,8 +57,14 @@ func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error return pe.putInt64Array(b.Offsets) } - pe.putInt64(b.Timestamp) - pe.putInt64(b.Offset) + if version >= 1 { + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + } + + if version >= 4 { + pe.putInt32(b.LeaderEpoch) + } return nil } @@ -165,17 +185,31 @@ func (r *OffsetResponse) headerVersion() int16 { return 0 } +func (r *OffsetResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_10_1_0 + case 4: + return V2_1_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 + case 1: + return V0_10_1_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_0_0_0 } } +func (r *OffsetResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + // testing API func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go similarity index 98% rename from vendor/github.com/Shopify/sarama/packet_decoder.go rename to vendor/github.com/IBM/sarama/packet_decoder.go index b8cae5350a932..526e0f42fe852 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/IBM/sarama/packet_decoder.go @@ -55,7 +55,7 @@ type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/packet_encoder.go rename to vendor/github.com/IBM/sarama/packet_encoder.go diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go similarity index 86% rename from vendor/github.com/Shopify/sarama/partitioner.go rename to vendor/github.com/IBM/sarama/partitioner.go index 57377760a7cbb..50a345a3eb29e 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/IBM/sarama/partitioner.go @@ -2,6 +2,7 @@ package sarama import ( "hash" + "hash/crc32" "hash/fnv" "math/rand" "time" @@ -53,6 +54,15 @@ func WithAbsFirst() HashPartitionerOption { } } +// WithHashUnsigned means the partitioner treats the hashed value as unsigned when +// partitioning. This is intended to be combined with the crc32 hash algorithm to +// be compatible with librdkafka's implementation +func WithHashUnsigned() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hashUnsigned = true + } +} + // WithCustomHashFunction lets you specify what hash function to use for the partitioning func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { return func(hp *hashPartitioner) { @@ -126,6 +136,7 @@ type hashPartitioner struct { random Partitioner hasher hash.Hash32 referenceAbs bool + hashUnsigned bool } // NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. @@ -137,6 +148,7 @@ func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor p.random = NewRandomPartitioner(topic) p.hasher = hasher() p.referenceAbs = false + p.hashUnsigned = false return p } } @@ -148,6 +160,7 @@ func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstruct p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false + p.hashUnsigned = false for _, option := range options { option(p) } @@ -164,6 +177,7 @@ func NewHashPartitioner(topic string) Partitioner { p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false + p.hashUnsigned = false return p } @@ -176,6 +190,19 @@ func NewReferenceHashPartitioner(topic string) Partitioner { p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = true + p.hashUnsigned = false + return p +} + +// NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash +// of the encoded bytes of the message key modulus the number of partitions. This is compatible with +// librdkafka's `consistent_random` partitioner +func NewConsistentCRCHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = crc32.NewIEEE() + p.referenceAbs = false + p.hashUnsigned = true return p } @@ -199,6 +226,10 @@ func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int3 // but not past Sarama versions if p.referenceAbs { partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else if p.hashUnsigned { + // librdkafka treats the hashed value as unsigned. If `hashUnsigned` is set we are compatible + // with librdkafka's `consistent` partitioning but not past Sarama versions + partition = int32(p.hasher.Sum32() % uint32(numPartitions)) } else { partition = int32(p.hasher.Sum32()) % numPartitions if partition < 0 { diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/prep_encoder.go rename to vendor/github.com/IBM/sarama/prep_encoder.go diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/produce_request.go rename to vendor/github.com/IBM/sarama/produce_request.go index 0034651e2542b..cbe58dd827245 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/IBM/sarama/produce_request.go @@ -29,7 +29,8 @@ type ProduceRequest struct { } func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { + topicCompressionRatioMetric metrics.Histogram, +) int64 { var topicRecordCount int64 for _, messageBlock := range msgSet.Messages { // Is this a fake "message" wrapping real messages? @@ -53,7 +54,8 @@ func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Hist } func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { + topicCompressionRatioMetric metrics.Histogram, +) int64 { if recordBatch.compressedRecords != nil { compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) compressionRatioMetric.Update(compressionRatio) @@ -210,18 +212,28 @@ func (r *ProduceRequest) headerVersion() int16 { return 1 } +func (r *ProduceRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 case 7: return V2_1_0_0 + case 6: + return V2_0_0_0 + case 4, 5: + return V1_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_1_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/produce_response.go rename to vendor/github.com/IBM/sarama/produce_response.go index edf978790c9fc..de53e06a0c6bf 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/IBM/sarama/produce_response.go @@ -175,8 +175,33 @@ func (r *ProduceResponse) headerVersion() int16 { return 0 } +func (r *ProduceResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *ProduceResponse) requiredVersion() KafkaVersion { - return MinVersion + switch r.Version { + case 7: + return V2_1_0_0 + case 6: + return V2_0_0_0 + case 4, 5: + return V1_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 + default: + return V2_1_0_0 + } +} + +func (r *ProduceResponse) throttleTime() time.Duration { + return r.ThrottleTime } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go similarity index 97% rename from vendor/github.com/Shopify/sarama/produce_set.go rename to vendor/github.com/IBM/sarama/produce_set.go index 8d6980479ebeb..004fc649039ec 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/IBM/sarama/produce_set.go @@ -141,8 +141,13 @@ func (ps *produceSet) buildRequest() *ProduceRequest { req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID } } - - if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } + if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) { + req.Version = 6 + } + if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { req.Version = 7 } diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go similarity index 100% rename from vendor/github.com/Shopify/sarama/quota_types.go rename to vendor/github.com/IBM/sarama/quota_types.go diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_decoder.go rename to vendor/github.com/IBM/sarama/real_decoder.go diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_encoder.go rename to vendor/github.com/IBM/sarama/real_encoder.go diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record.go rename to vendor/github.com/IBM/sarama/record.go diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go similarity index 95% rename from vendor/github.com/Shopify/sarama/record_batch.go rename to vendor/github.com/IBM/sarama/record_batch.go index d382ca488721c..c422c5c2f238e 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/IBM/sarama/record_batch.go @@ -20,12 +20,12 @@ func (e recordsArray) encode(pe packetEncoder) error { } func (e recordsArray) decode(pd packetDecoder) error { + records := make([]Record, len(e)) for i := range e { - rec := &Record{} - if err := rec.decode(pd); err != nil { + if err := records[i].decode(pd); err != nil { return err } - e[i] = rec + e[i] = &records[i] } return nil } @@ -58,7 +58,7 @@ func (b *RecordBatch) LastOffset() int64 { func (b *RecordBatch) encode(pe packetEncoder) error { if b.Version != 2 { - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)} } pe.putInt64(b.FirstOffset) pe.push(&lengthField{}) diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/IBM/sarama/records.go similarity index 100% rename from vendor/github.com/Shopify/sarama/records.go rename to vendor/github.com/IBM/sarama/records.go diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/IBM/sarama/request.go similarity index 52% rename from vendor/github.com/Shopify/sarama/request.go rename to vendor/github.com/IBM/sarama/request.go index 1e3923de73c7f..e8e74ca34a60f 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/IBM/sarama/request.go @@ -12,6 +12,7 @@ type protocolBody interface { key() int16 version() int16 headerVersion() int16 + isValidVersion() bool requiredVersion() KafkaVersion } @@ -119,85 +120,114 @@ func decodeRequest(r io.Reader) (*request, int, error) { func allocateBody(key, version int16) protocolBody { switch key { case 0: - return &ProduceRequest{} + return &ProduceRequest{Version: version} case 1: return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: return &MetadataRequest{Version: version} + // 4: LeaderAndIsrRequest + // 5: StopReplicaRequest + // 6: UpdateMetadataRequest + // 7: ControlledShutdownRequest case 8: return &OffsetCommitRequest{Version: version} case 9: return &OffsetFetchRequest{Version: version} case 10: - return &FindCoordinatorRequest{} + return &FindCoordinatorRequest{Version: version} case 11: - return &JoinGroupRequest{} + return &JoinGroupRequest{Version: version} case 12: - return &HeartbeatRequest{} + return &HeartbeatRequest{Version: version} case 13: - return &LeaveGroupRequest{} + return &LeaveGroupRequest{Version: version} case 14: - return &SyncGroupRequest{} + return &SyncGroupRequest{Version: version} case 15: - return &DescribeGroupsRequest{} + return &DescribeGroupsRequest{Version: version} case 16: - return &ListGroupsRequest{} + return &ListGroupsRequest{Version: version} case 17: - return &SaslHandshakeRequest{} + return &SaslHandshakeRequest{Version: version} case 18: return &ApiVersionsRequest{Version: version} case 19: - return &CreateTopicsRequest{} + return &CreateTopicsRequest{Version: version} case 20: - return &DeleteTopicsRequest{} + return &DeleteTopicsRequest{Version: version} case 21: - return &DeleteRecordsRequest{} + return &DeleteRecordsRequest{Version: version} case 22: return &InitProducerIDRequest{Version: version} + // 23: OffsetForLeaderEpochRequest case 24: - return &AddPartitionsToTxnRequest{} + return &AddPartitionsToTxnRequest{Version: version} case 25: - return &AddOffsetsToTxnRequest{} + return &AddOffsetsToTxnRequest{Version: version} case 26: - return &EndTxnRequest{} + return &EndTxnRequest{Version: version} + // 27: WriteTxnMarkersRequest case 28: - return &TxnOffsetCommitRequest{} + return &TxnOffsetCommitRequest{Version: version} case 29: - return &DescribeAclsRequest{} + return &DescribeAclsRequest{Version: int(version)} case 30: - return &CreateAclsRequest{} + return &CreateAclsRequest{Version: version} case 31: - return &DeleteAclsRequest{} + return &DeleteAclsRequest{Version: int(version)} case 32: - return &DescribeConfigsRequest{} + return &DescribeConfigsRequest{Version: version} case 33: - return &AlterConfigsRequest{} + return &AlterConfigsRequest{Version: version} + // 34: AlterReplicaLogDirsRequest case 35: - return &DescribeLogDirsRequest{} + return &DescribeLogDirsRequest{Version: version} case 36: - return &SaslAuthenticateRequest{} + return &SaslAuthenticateRequest{Version: version} case 37: - return &CreatePartitionsRequest{} + return &CreatePartitionsRequest{Version: version} + // 38: CreateDelegationTokenRequest + // 39: RenewDelegationTokenRequest + // 40: ExpireDelegationTokenRequest + // 41: DescribeDelegationTokenRequest case 42: - return &DeleteGroupsRequest{} + return &DeleteGroupsRequest{Version: version} + // 43: ElectLeadersRequest case 44: - return &IncrementalAlterConfigsRequest{} + return &IncrementalAlterConfigsRequest{Version: version} case 45: - return &AlterPartitionReassignmentsRequest{} + return &AlterPartitionReassignmentsRequest{Version: version} case 46: - return &ListPartitionReassignmentsRequest{} + return &ListPartitionReassignmentsRequest{Version: version} case 47: - return &DeleteOffsetsRequest{} + return &DeleteOffsetsRequest{Version: version} case 48: - return &DescribeClientQuotasRequest{} + return &DescribeClientQuotasRequest{Version: version} case 49: - return &AlterClientQuotasRequest{} + return &AlterClientQuotasRequest{Version: version} case 50: - return &DescribeUserScramCredentialsRequest{} + return &DescribeUserScramCredentialsRequest{Version: version} case 51: - return &AlterUserScramCredentialsRequest{} + return &AlterUserScramCredentialsRequest{Version: version} + // 52: VoteRequest + // 53: BeginQuorumEpochRequest + // 54: EndQuorumEpochRequest + // 55: DescribeQuorumRequest + // 56: AlterPartitionRequest + // 57: UpdateFeaturesRequest + // 58: EnvelopeRequest + // 59: FetchSnapshotRequest + // 60: DescribeClusterRequest + // 61: DescribeProducersRequest + // 62: BrokerRegistrationRequest + // 63: BrokerHeartbeatRequest + // 64: UnregisterBrokerRequest + // 65: DescribeTransactionsRequest + // 66: ListTransactionsRequest + // 67: AllocateProducerIdsRequest + // 68: ConsumerGroupHeartbeatRequest } return nil } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go similarity index 100% rename from vendor/github.com/Shopify/sarama/response_header.go rename to vendor/github.com/IBM/sarama/response_header.go diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go similarity index 99% rename from vendor/github.com/Shopify/sarama/sarama.go rename to vendor/github.com/IBM/sarama/sarama.go index a42bc075a1499..4d5f60a6664e0 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/IBM/sarama/sarama.go @@ -91,7 +91,7 @@ import ( var ( // Logger is the instance of a StdLogger interface that Sarama writes connection - // management events to. By default it is set to discard all log messages via ioutil.Discard, + // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go similarity index 89% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_request.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_request.go index 5bb0988ea5f79..3a562a53b8f73 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go +++ b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go @@ -31,6 +31,10 @@ func (r *SaslAuthenticateRequest) headerVersion() int16 { return 1 } +func (r *SaslAuthenticateRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_response.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_response.go index 37c8e45dae1ab..ae52cde1c5e83 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go +++ b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go @@ -59,6 +59,10 @@ func (r *SaslAuthenticateResponse) headerVersion() int16 { return 0 } +func (r *SaslAuthenticateResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go similarity index 78% rename from vendor/github.com/Shopify/sarama/sasl_handshake_request.go rename to vendor/github.com/IBM/sarama/sasl_handshake_request.go index 74dc3072f4883..410a5b0eaa6db 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/IBM/sarama/sasl_handshake_request.go @@ -33,6 +33,15 @@ func (r *SaslHandshakeRequest) headerVersion() int16 { return 1 } +func (r *SaslHandshakeRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 1: + return V1_0_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go similarity index 77% rename from vendor/github.com/Shopify/sarama/sasl_handshake_response.go rename to vendor/github.com/IBM/sarama/sasl_handshake_response.go index 69dfc3178ec29..502732cbd37c6 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ b/vendor/github.com/IBM/sarama/sasl_handshake_response.go @@ -1,6 +1,7 @@ package sarama type SaslHandshakeResponse struct { + Version int16 Err KError EnabledMechanisms []string } @@ -30,13 +31,22 @@ func (r *SaslHandshakeResponse) key() int16 { } func (r *SaslHandshakeResponse) version() int16 { - return 0 + return r.Version } func (r *SaslHandshakeResponse) headerVersion() int16 { return 0 } +func (r *SaslHandshakeResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 1: + return V1_0_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/scram_formatter.go rename to vendor/github.com/IBM/sarama/scram_formatter.go diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/sync_group_request.go rename to vendor/github.com/IBM/sarama/sync_group_request.go index 33ed3baccbf32..95efc285801be 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/IBM/sarama/sync_group_request.go @@ -123,12 +123,23 @@ func (r *SyncGroupRequest) headerVersion() int16 { return 1 } +func (r *SyncGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { - switch { - case r.Version >= 3: + switch r.Version { + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: return V2_3_0_0 } - return V0_9_0_0 } func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go similarity index 77% rename from vendor/github.com/Shopify/sarama/sync_group_response.go rename to vendor/github.com/IBM/sarama/sync_group_response.go index 41b63b3d0362d..f7da15b4f1c10 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/IBM/sarama/sync_group_response.go @@ -1,9 +1,11 @@ package sarama +import "time" + type SyncGroupResponse struct { // Version defines the protocol version to use for encode and decode Version int16 - // ThrottleTimeMs contains the duration in milliseconds for which the + // ThrottleTime contains the duration in milliseconds for which the // request was throttled due to a quota violation, or zero if the request // did not violate any quota. ThrottleTime int32 @@ -57,10 +59,25 @@ func (r *SyncGroupResponse) headerVersion() int16 { return 0 } +func (r *SyncGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: return V2_3_0_0 } - return V0_9_0_0 +} + +func (r *SyncGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/sync_producer.go rename to vendor/github.com/IBM/sarama/sync_producer.go index 8765ac3368ef3..3119baa6d7f60 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/IBM/sarama/sync_producer.go @@ -33,7 +33,7 @@ type SyncProducer interface { // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go similarity index 100% rename from vendor/github.com/Shopify/sarama/timestamp.go rename to vendor/github.com/IBM/sarama/timestamp.go diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go similarity index 91% rename from vendor/github.com/Shopify/sarama/transaction_manager.go rename to vendor/github.com/IBM/sarama/transaction_manager.go index e18abecd385e4..bf20b75e90570 100644 --- a/vendor/github.com/Shopify/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -14,7 +14,7 @@ type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota - // ProducerTxnFlagInitializing when txnmgr is initilizing + // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady @@ -22,7 +22,7 @@ const ( ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction - // ProducerTxnFlagInError whan having abortable or fatal error + // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction @@ -117,13 +117,13 @@ var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we need are initilizing + // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we have initilized transactional producer + // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, @@ -161,8 +161,10 @@ type topicPartition struct { } // to ensure that we don't do a full scan every time a partition or an offset is added. -type topicPartitionSet map[topicPartition]struct{} -type topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata +type ( + topicPartitionSet map[topicPartition]struct{} + topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata +) func (s topicPartitionSet) mapToRequest() map[string][]int32 { result := make(map[string][]int32, len(s)) @@ -315,12 +317,20 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, if err != nil { return true, err } - response, err := coordinator.AddOffsetsToTxn(&AddOffsetsToTxnRequest{ + request := &AddOffsetsToTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, - }) + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + response, err := coordinator.AddOffsetsToTxn(request) if err != nil { // If an error occurred try to refresh current transaction coordinator. _ = coordinator.Close() @@ -390,13 +400,21 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, if err != nil { return resultOffsets, true, err } - responses, err := consumerGroupCoordinator.TxnOffsetCommit(&TxnOffsetCommitRequest{ + request := &TxnOffsetCommitRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, Topics: offsets.mapToRequest(), - }) + } + if t.client.Config().Version.IsAtLeast(V2_1_0_0) { + // Version 2 adds the committed leader epoch. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + responses, err := consumerGroupCoordinator.TxnOffsetCommit(request) if err != nil { _ = consumerGroupCoordinator.Close() _ = t.client.RefreshCoordinator(groupId) @@ -448,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } @@ -466,13 +484,24 @@ func (t *transactionManager) initProducerId() (int64, int16, error) { } if t.client.Config().Version.IsAtLeast(V2_5_0_0) { - req.Version = 3 + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 4 adds the support for new error code PRODUCER_FENCED. + req.Version = 4 + } else { + // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try + // to resume after an INVALID_PRODUCER_EPOCH error + req.Version = 3 + } isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch t.coordinatorSupportsBumpingEpoch = true req.ProducerID = t.producerID req.ProducerEpoch = t.producerEpoch } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) { + // Version 2 is the first flexible version. req.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + req.Version = 1 } if isEpochBump { @@ -540,9 +569,8 @@ func (t *transactionManager) initProducerId() (int64, int16, error) { return response.ProducerID, response.ProducerEpoch, false, nil } switch response.Err { - case ErrConsumerCoordinatorNotAvailable: - fallthrough - case ErrNotCoordinatorForConsumer: + // Retriable errors + case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress: if t.isTransactional() { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) @@ -610,12 +638,20 @@ func (t *transactionManager) endTxn(commit bool) error { if err != nil { return true, err } - response, err := coordinator.EndTxn(&EndTxnRequest{ + request := &EndTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, TransactionResult: commit, - }) + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + response, err := coordinator.EndTxn(request) if err != nil { // Always retry on network error _ = coordinator.Close() @@ -660,7 +696,7 @@ func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() - // Ensure no error when committing or abording + // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { @@ -779,13 +815,20 @@ func (t *transactionManager) publishTxnPartitions() error { if err != nil { return true, err } - addPartResponse, err := coordinator.AddPartitionsToTxn(&AddPartitionsToTxnRequest{ + request := &AddPartitionsToTxnRequest{ TransactionalID: t.transactionalID, ProducerID: t.producerID, ProducerEpoch: t.producerEpoch, TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(), - }) - + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + addPartResponse, err := coordinator.AddPartitionsToTxn(request) if err != nil { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go similarity index 73% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_request.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_request.go index c4043a33520d8..ca13afb3b2fbc 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go @@ -1,6 +1,7 @@ package sarama type TxnOffsetCommitRequest struct { + Version int16 TransactionalID string GroupID string ProducerID int64 @@ -29,7 +30,7 @@ func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { return err } for _, partition := range partitions { - if err := partition.encode(pe); err != nil { + if err := partition.encode(pe, t.Version); err != nil { return err } } @@ -39,6 +40,7 @@ func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { } func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + t.Version = version if t.TransactionalID, err = pd.getString(); err != nil { return err } @@ -88,26 +90,49 @@ func (a *TxnOffsetCommitRequest) key() int16 { } func (a *TxnOffsetCommitRequest) version() int16 { - return 0 + return a.Version } func (a *TxnOffsetCommitRequest) headerVersion() int16 { return 1 } +func (a *TxnOffsetCommitRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_1_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_1_0_0 + } } type PartitionOffsetMetadata struct { + // Partition contains the index of the partition within the topic. Partition int32 - Offset int64 - Metadata *string + // Offset contains the message offset to be committed. + Offset int64 + // LeaderEpoch contains the leader epoch of the last consumed record. + LeaderEpoch int32 + // Metadata contains any associated metadata the client wants to keep. + Metadata *string } -func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { +func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error { pe.putInt32(p.Partition) pe.putInt64(p.Offset) + + if version >= 2 { + pe.putInt32(p.LeaderEpoch) + } + if err := pe.putNullableString(p.Metadata); err != nil { return err } @@ -122,6 +147,13 @@ func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err e if p.Offset, err = pd.getInt64(); err != nil { return err } + + if version >= 2 { + if p.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + if p.Metadata, err = pd.getNullableString(); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go similarity index 80% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_response.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_response.go index 94d8029dace61..d5144faf7761d 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go +++ b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go @@ -5,6 +5,7 @@ import ( ) type TxnOffsetCommitResponse struct { + Version int16 ThrottleTime time.Duration Topics map[string][]*PartitionError } @@ -33,6 +34,7 @@ func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { } func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + t.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -75,13 +77,30 @@ func (a *TxnOffsetCommitResponse) key() int16 { } func (a *TxnOffsetCommitResponse) version() int16 { - return 0 + return a.Version } func (a *TxnOffsetCommitResponse) headerVersion() int16 { return 0 } +func (a *TxnOffsetCommitResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_1_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_1_0_0 + } +} + +func (r *TxnOffsetCommitResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go similarity index 86% rename from vendor/github.com/Shopify/sarama/utils.go rename to vendor/github.com/IBM/sarama/utils.go index 819b6597cd39d..feadc0065bbe5 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -193,6 +193,12 @@ var ( V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) + V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) + V3_4_0_0 = newKafkaVersion(3, 4, 0, 0) + V3_4_1_0 = newKafkaVersion(3, 4, 1, 0) + V3_5_0_0 = newKafkaVersion(3, 5, 0, 0) + V3_5_1_0 = newKafkaVersion(3, 5, 1, 0) + V3_6_0_0 = newKafkaVersion(3, 6, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -248,12 +254,18 @@ var ( V3_2_3_0, V3_3_0_0, V3_3_1_0, + V3_3_2_0, + V3_4_0_0, + V3_4_1_0, + V3_5_0_0, + V3_5_1_0, + V3_6_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_3_1_0 - DefaultVersion = V1_0_0_0 + MaxVersion = V3_6_0_0 + DefaultVersion = V2_1_0_0 - // reduced set of versions to matrix test + // reduced set of protocol versions to matrix test fvtRangeVersions = []KafkaVersion{ V0_8_2_2, V0_10_2_2, @@ -265,11 +277,19 @@ var ( V2_6_2_0, V2_8_2_0, V3_1_2_0, - V3_2_3_0, - V3_3_1_0, + V3_3_2_0, + V3_6_0_0, } ) +var ( + // This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3 + validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`) + + // This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0 + validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`) +) + // ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { @@ -278,9 +298,9 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { var major, minor, veryMinor, patch uint var err error if s[0] == '0' { - err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) } else { - err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) } if err != nil { return DefaultVersion, err @@ -288,8 +308,8 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { return newKafkaVersion(major, minor, veryMinor, patch), nil } -func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { - if !regexp.MustCompile(pattern).MatchString(s) { +func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error { + if !pattern.MatchString(s) { return fmt.Errorf("invalid version `%s`", s) } _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/IBM/sarama/version.go similarity index 100% rename from vendor/github.com/Shopify/sarama/version.go rename to vendor/github.com/IBM/sarama/version.go diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go similarity index 100% rename from vendor/github.com/Shopify/sarama/zstd.go rename to vendor/github.com/IBM/sarama/zstd.go diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index c2f92ec9a1012..0000000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,1187 +0,0 @@ -# Changelog - -## Version 1.31.1 (2022-02-01) - -- #2126 - @bai - Populate missing kafka versions -- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image -- #2123 - @bai - Update klauspost/compress to 0.14 -- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy -- #2119 - @bai - Add Kafka 3.1.0 version number -- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption -- #2051 - @seveas - Expose the TLS connection state of a broker connection -- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys -- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup -- #2113 - @mosceo - Fix typo - -## Version 1.31.0 (2022-01-18) - -## What's Changed -### :tada: New Features / Improvements -* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 -* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 -* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 -### :bug: Fixes -* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 -* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 -* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 -* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 -* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 -* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 -* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 -* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 -### :wrench: Maintenance -* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 -### :memo: Documentation -* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 -### :heavy_plus_sign: Other Changes -* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 - -## New Contributors -* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 -* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 -* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 -* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 - -## Version 1.30.1 (2021-12-04) - -## What's Changed -### :tada: New Features / Improvements -* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 -### :bug: Fixes -* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 -* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 -* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 -* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 -### :wrench: Maintenance -* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 -* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 - -## Notes -* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x - -## New Contributors -* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 -* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 -* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 -* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 - -## Version 1.30.0 (2021-09-29) - -⚠️ This release has been superseded by v1.30.1 and should _not_ be used. - -**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 - ---- - -ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** - ---- - -# New Features / Improvements - -- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh -- #2000 - @matzew - Using xdg-go module for SCRAM -- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures -- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM -- #2006 - @faillefer - Add support for DeleteOffsets operation -- #1909 - @agriffaut - KIP-546 Client quota APIs -- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state -- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger -- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log -- #2019 - @dnwe - feat: add logging & a metric for producer throttle -- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface -- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol -- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open -- #2034 - @bai - Add support for kafka 3.0.0 - -# Fixes - -- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest -- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation -- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls -- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true -- #2007 - @bai - Add support for Go 1.17 -- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks -- #2010 - @dnwe - chore: enable exportloopref and misspell linters -- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements -- #2015 - @bai - Change default branch to main -- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() -- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 -- #2016 - @dnwe - chore: replace deprecated Go calls -- #2017 - @dnwe - chore: delete legacy vagrant script -- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test -- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 -- #2033 - @bai - Update dependencies -- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method -- #2035 - @dnwe - chore: populate the missing kafka versions -- #2038 - @dnwe - feat: add a fuzzing workflow to github actions - -## New Contributors -* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 -* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 -* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 -* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 -* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 -* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 -* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 -* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 -* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 -* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 - -## Version 1.29.1 (2021-06-24) - -# New Features / Improvements - -- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API -- #1964 - @ajanikow - Add DelegationToken ResourceType - -# Fixes - -- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire -- #1971 - @KerryJava - fix kafka-producer-performance throughput panic -- #1968 - @dnwe - chore: bump golang.org/x versions -- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers -- #1963 - @dnwe - fix: ensure backoff timer is re-used -- #1949 - @dnwe - fix: explicitly use uint64 for payload length - -## Version 1.29.0 (2021-05-07) - -### New Features / Improvements - -- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API -- #1869 - @wyndhblb - zstd: encode+decode performance improvements -- #1541 - @izolight - add String, (Un)MarshalText for acl types. -- #1921 - @bai - Add support for Kafka 2.8.0 - -### Fixes -- #1936 - @dnwe - fix(consumer): follow preferred broker -- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) -- #1926 - @dnwe - fix: correct initial CodeQL findings -- #1925 - @bai - Test out CodeQL -- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos -- #1922 - @bai - Update go dependencies -- #1898 - @mmaslankaprv - Parsing only known control batches value -- #1887 - @withshubh - Fix: issues affecting code quality - -## Version 1.28.0 (2021-02-15) - -**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - -- #1870 - @kvch - Update Kerberos library to latest major -- #1876 - @bai - Update docs, reference pkg.go.dev -- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close -- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages -- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies -- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy -- #1862 - @bai - Fix CI setenv permissions issues -- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev -- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica - -## Version 1.27.2 (2020-10-21) - -### Improvements - -#1750 - @krantideep95 Adds missing mock responses for mocking consumer group - -## Fixes - -#1817 - reverts #1785 - Add private method to Client interface to prevent implementation - -## Version 1.27.1 (2020-10-07) - -### Improvements - -#1775 - @d1egoaz - Adds a Producer Interceptor example -#1781 - @justin-chen - Refresh brokers given list of seed brokers -#1784 - @justin-chen - Add randomize seed broker method -#1790 - @d1egoaz - remove example binary -#1798 - @bai - Test against Go 1.15 -#1785 - @justin-chen - Add private method to Client interface to prevent implementation -#1802 - @uvw - Support Go 1.13 error unwrapping - -## Fixes - -#1791 - @stanislavkozlovski - bump default version to 1.0.0 - -## Version 1.27.0 (2020-08-11) - -### Improvements - -#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration -#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests -#1699 - @wclaeys - Consumer group support for manually comitting offsets -#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 -#1726 - @d1egoaz - Include zstd on the functional tests -#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors -#1738 - @varun06 - fixed variable names that are named same as some std lib package names -#1741 - @varun06 - updated zstd dependency to latest v1.10.10 -#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base -#1763 - @alrs - remove deprecated tls options from test -#1769 - @bai - Add support for Kafka 2.6.0 - -## Fixes - -#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -#1744 - @alrs - Fix isBalanced Function Signature - -## Version 1.26.4 (2020-05-19) - -## Fixes - -- #1701 - @d1egoaz - Set server name only for the current broker -- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka - -## Version 1.26.3 (2020-05-07) - -## Fixes - -- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config - -## Version 1.26.2 (2020-05-06) - -## ⚠️ Known Issues - -This release has been marked as not ready for production and may be unstable, please use v1.26.4. - -### Improvements - -- #1560 - @iyacontrol - add sync pool for gzip 1-9 -- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs -- #1632 - @bai - Add support for Go 1.14 -- #1640 - @random-dwi - Feature/fix list partition reassignments -- #1646 - @mimaison - Add DescribeLogDirs to admin client -- #1667 - @bai - Add support for kafka 2.5.0 - -## Fixes - -- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 -- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine -- #1602 - @d1egoaz - adds a note about consumer groups Consume method -- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly -- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented -- #1614 - @alrs - produce_response.go: Remove Unused Functions -- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables -- #1639 - @agriffaut - Handle errors with no message but error code -- #1643 - @kzinglzy - fix `config.net.keepalive` -- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs -- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata -- #1650 - @lavoiesl - Return the response error in heartbeatLoop -- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die -- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. - -## Version 1.26.1 (2020-02-04) - -Improvements: -- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) -- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) -- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) -- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) - -Bug Fixes: -- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) -- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) - -## Version 1.26.0 (2020-01-24) - -New Features: -- Enable zstd compression - ([1574](https://github.com/Shopify/sarama/pull/1574), - [1582](https://github.com/Shopify/sarama/pull/1582)) -- Support headers in tools kafka-console-producer - ([1549](https://github.com/Shopify/sarama/pull/1549)) - -Improvements: -- Add SASL AuthIdentity to SASL frames (authzid) - ([1585](https://github.com/Shopify/sarama/pull/1585)). - -Bug Fixes: -- Sending messages with ZStd compression enabled fails in multiple ways - ([1252](https://github.com/Shopify/sarama/issues/1252)). -- Use the broker for any admin on BrokerConfig - ([1571](https://github.com/Shopify/sarama/pull/1571)). -- Set DescribeConfigRequest Version field - ([1576](https://github.com/Shopify/sarama/pull/1576)). -- ConsumerGroup flooding logs with client/metadata update req - ([1578](https://github.com/Shopify/sarama/pull/1578)). -- MetadataRequest version in DescribeCluster - ([1580](https://github.com/Shopify/sarama/pull/1580)). -- Fix deadlock in consumer group handleError - ([1581](https://github.com/Shopify/sarama/pull/1581)) -- Fill in the Fetch{Request,Response} protocol - ([1582](https://github.com/Shopify/sarama/pull/1582)). -- Retry topic request on ControllerNotAvailable - ([1586](https://github.com/Shopify/sarama/pull/1586)). - -## Version 1.25.0 (2020-01-13) - -New Features: -- Support TLS protocol in kafka-producer-performance - ([1538](https://github.com/Shopify/sarama/pull/1538)). -- Add support for kafka 2.4.0 - ([1552](https://github.com/Shopify/sarama/pull/1552)). - -Improvements: -- Allow the Consumer to disable auto-commit offsets - ([1164](https://github.com/Shopify/sarama/pull/1164)). -- Produce records with consistent timestamps - ([1455](https://github.com/Shopify/sarama/pull/1455)). - -Bug Fixes: -- Fix incorrect SetTopicMetadata name mentions - ([1534](https://github.com/Shopify/sarama/pull/1534)). -- Fix client.tryRefreshMetadata Println - ([1535](https://github.com/Shopify/sarama/pull/1535)). -- Fix panic on calling updateMetadata on closed client - ([1531](https://github.com/Shopify/sarama/pull/1531)). -- Fix possible faulty metrics in TestFuncProducing - ([1545](https://github.com/Shopify/sarama/pull/1545)). - -## Version 1.24.1 (2019-10-31) - -New Features: -- Add DescribeLogDirs Request/Response pair - ([1520](https://github.com/Shopify/sarama/pull/1520)). - -Bug Fixes: -- Fix ClusterAdmin returning invalid controller ID on DescribeCluster - ([1518](https://github.com/Shopify/sarama/pull/1518)). -- Fix issue with consumergroup not rebalancing when new partition is added - ([1525](https://github.com/Shopify/sarama/pull/1525)). -- Ensure consistent use of read/write deadlines - ([1529](https://github.com/Shopify/sarama/pull/1529)). - -## Version 1.24.0 (2019-10-09) - -New Features: -- Add sticky partition assignor - ([1416](https://github.com/Shopify/sarama/pull/1416)). -- Switch from cgo zstd package to pure Go implementation - ([1477](https://github.com/Shopify/sarama/pull/1477)). - -Improvements: -- Allow creating ClusterAdmin from client - ([1415](https://github.com/Shopify/sarama/pull/1415)). -- Set KafkaVersion in ListAcls method - ([1452](https://github.com/Shopify/sarama/pull/1452)). -- Set request version in CreateACL ClusterAdmin method - ([1458](https://github.com/Shopify/sarama/pull/1458)). -- Set request version in DeleteACL ClusterAdmin method - ([1461](https://github.com/Shopify/sarama/pull/1461)). -- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest - ([1464](https://github.com/Shopify/sarama/pull/1464)). -- Remove direct usage of gofork - ([1465](https://github.com/Shopify/sarama/pull/1465)). -- Add support for Go 1.13 - ([1478](https://github.com/Shopify/sarama/pull/1478)). -- Improve behavior of NewMockListAclsResponse - ([1481](https://github.com/Shopify/sarama/pull/1481)). - -Bug Fixes: -- Fix race condition in consumergroup example - ([1434](https://github.com/Shopify/sarama/pull/1434)). -- Fix brokerProducer goroutine leak - ([1442](https://github.com/Shopify/sarama/pull/1442)). -- Use released version of lz4 library - ([1469](https://github.com/Shopify/sarama/pull/1469)). -- Set correct version in MockDeleteTopicsResponse - ([1484](https://github.com/Shopify/sarama/pull/1484)). -- Fix CLI help message typo - ([1494](https://github.com/Shopify/sarama/pull/1494)). - -Known Issues: -- Please **don't** use Zstd, as it doesn't work right now. - See https://github.com/Shopify/sarama/issues/1252 - -## Version 1.23.1 (2019-07-22) - -Bug Fixes: -- Fix fetch delete bug record - ([1425](https://github.com/Shopify/sarama/pull/1425)). -- Handle SASL/OAUTHBEARER token rejection - ([1428](https://github.com/Shopify/sarama/pull/1428)). - -## Version 1.23.0 (2019-07-02) - -New Features: -- Add support for Kafka 2.3.0 - ([1418](https://github.com/Shopify/sarama/pull/1418)). -- Add support for ListConsumerGroupOffsets v2 - ([1374](https://github.com/Shopify/sarama/pull/1374)). -- Add support for DeleteConsumerGroup - ([1417](https://github.com/Shopify/sarama/pull/1417)). -- Add support for SASLVersion configuration - ([1410](https://github.com/Shopify/sarama/pull/1410)). -- Add kerberos support - ([1366](https://github.com/Shopify/sarama/pull/1366)). - -Improvements: -- Improve sasl_scram_client example - ([1406](https://github.com/Shopify/sarama/pull/1406)). -- Fix shutdown and race-condition in consumer-group example - ([1404](https://github.com/Shopify/sarama/pull/1404)). -- Add support for error codes 77—81 - ([1397](https://github.com/Shopify/sarama/pull/1397)). -- Pool internal objects allocated per message - ([1385](https://github.com/Shopify/sarama/pull/1385)). -- Reduce packet decoder allocations - ([1373](https://github.com/Shopify/sarama/pull/1373)). -- Support timeout when fetching metadata - ([1359](https://github.com/Shopify/sarama/pull/1359)). - -Bug Fixes: -- Fix fetch size integer overflow - ([1376](https://github.com/Shopify/sarama/pull/1376)). -- Handle and log throttled FetchResponses - ([1383](https://github.com/Shopify/sarama/pull/1383)). -- Refactor misspelled word Resouce to Resource - ([1368](https://github.com/Shopify/sarama/pull/1368)). - -## Version 1.22.1 (2019-04-29) - -Improvements: -- Use zstd 1.3.8 - ([1350](https://github.com/Shopify/sarama/pull/1350)). -- Add support for SaslHandshakeRequest v1 - ([1354](https://github.com/Shopify/sarama/pull/1354)). - -Bug Fixes: -- Fix V5 MetadataRequest nullable topics array - ([1353](https://github.com/Shopify/sarama/pull/1353)). -- Use a different SCRAM client for each broker connection - ([1349](https://github.com/Shopify/sarama/pull/1349)). -- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 - ([1344](https://github.com/Shopify/sarama/pull/1344)). - -## Version 1.22.0 (2019-04-09) - -New Features: -- Add Offline Replicas Operation to Client - ([1318](https://github.com/Shopify/sarama/pull/1318)). -- Allow using proxy when connecting to broker - ([1326](https://github.com/Shopify/sarama/pull/1326)). -- Implement ReadCommitted - ([1307](https://github.com/Shopify/sarama/pull/1307)). -- Add support for Kafka 2.2.0 - ([1331](https://github.com/Shopify/sarama/pull/1331)). -- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes - ([1331](https://github.com/Shopify/sarama/pull/1295)). - -Improvements: -- Unregister all broker metrics on broker stop - ([1232](https://github.com/Shopify/sarama/pull/1232)). -- Add SCRAM authentication example - ([1303](https://github.com/Shopify/sarama/pull/1303)). -- Add consumergroup examples - ([1304](https://github.com/Shopify/sarama/pull/1304)). -- Expose consumer batch size metric - ([1296](https://github.com/Shopify/sarama/pull/1296)). -- Add TLS options to console producer and consumer - ([1300](https://github.com/Shopify/sarama/pull/1300)). -- Reduce client close bookkeeping - ([1297](https://github.com/Shopify/sarama/pull/1297)). -- Satisfy error interface in create responses - ([1154](https://github.com/Shopify/sarama/pull/1154)). -- Please lint gods - ([1346](https://github.com/Shopify/sarama/pull/1346)). - -Bug Fixes: -- Fix multi consumer group instance crash - ([1338](https://github.com/Shopify/sarama/pull/1338)). -- Update lz4 to latest version - ([1347](https://github.com/Shopify/sarama/pull/1347)). -- Retry ErrNotCoordinatorForConsumer in new consumergroup session - ([1231](https://github.com/Shopify/sarama/pull/1231)). -- Fix cleanup error handler - ([1332](https://github.com/Shopify/sarama/pull/1332)). -- Fix rate condition in PartitionConsumer - ([1156](https://github.com/Shopify/sarama/pull/1156)). - -## Version 1.21.0 (2019-02-24) - -New Features: -- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest - ([1236](https://github.com/Shopify/sarama/pull/1236)). -- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests - ([1178](https://github.com/Shopify/sarama/pull/1178)). -- Implement SASL/OAUTHBEARER - ([1240](https://github.com/Shopify/sarama/pull/1240)). - -Improvements: -- Add Go mod support - ([1282](https://github.com/Shopify/sarama/pull/1282)). -- Add error codes 73—76 - ([1239](https://github.com/Shopify/sarama/pull/1239)). -- Add retry backoff function - ([1160](https://github.com/Shopify/sarama/pull/1160)). -- Maintain metadata in the producer even when retries are disabled - ([1189](https://github.com/Shopify/sarama/pull/1189)). -- Include ReplicaAssignment in ListTopics - ([1274](https://github.com/Shopify/sarama/pull/1274)). -- Add producer performance tool - ([1222](https://github.com/Shopify/sarama/pull/1222)). -- Add support LogAppend timestamps - ([1258](https://github.com/Shopify/sarama/pull/1258)). - -Bug Fixes: -- Fix potential deadlock when a heartbeat request fails - ([1286](https://github.com/Shopify/sarama/pull/1286)). -- Fix consuming compacted topic - ([1227](https://github.com/Shopify/sarama/pull/1227)). -- Set correct Kafka version for DescribeConfigsRequest v1 - ([1277](https://github.com/Shopify/sarama/pull/1277)). -- Update kafka test version - ([1273](https://github.com/Shopify/sarama/pull/1273)). - -## Version 1.20.1 (2019-01-10) - -New Features: -- Add optional replica id in offset request - ([1100](https://github.com/Shopify/sarama/pull/1100)). - -Improvements: -- Implement DescribeConfigs Request + Response v1 & v2 - ([1230](https://github.com/Shopify/sarama/pull/1230)). -- Reuse compression objects - ([1185](https://github.com/Shopify/sarama/pull/1185)). -- Switch from png to svg for GoDoc link in README - ([1243](https://github.com/Shopify/sarama/pull/1243)). -- Fix typo in deprecation notice for FetchResponseBlock.Records - ([1242](https://github.com/Shopify/sarama/pull/1242)). -- Fix typos in consumer metadata response file - ([1244](https://github.com/Shopify/sarama/pull/1244)). - -Bug Fixes: -- Revert to individual msg retries for non-idempotent - ([1203](https://github.com/Shopify/sarama/pull/1203)). -- Respect MaxMessageBytes limit for uncompressed messages - ([1141](https://github.com/Shopify/sarama/pull/1141)). - -## Version 1.20.0 (2018-12-10) - -New Features: - - Add support for zstd compression - ([#1170](https://github.com/Shopify/sarama/pull/1170)). - - Add support for Idempotent Producer - ([#1152](https://github.com/Shopify/sarama/pull/1152)). - - Add support support for Kafka 2.1.0 - ([#1229](https://github.com/Shopify/sarama/pull/1229)). - - Add support support for OffsetCommit request/response pairs versions v1 to v5 - ([#1201](https://github.com/Shopify/sarama/pull/1201)). - - Add support support for OffsetFetch request/response pair up to version v5 - ([#1198](https://github.com/Shopify/sarama/pull/1198)). - -Improvements: - - Export broker's Rack setting - ([#1173](https://github.com/Shopify/sarama/pull/1173)). - - Always use latest patch version of Go on CI - ([#1202](https://github.com/Shopify/sarama/pull/1202)). - - Add error codes 61 to 72 - ([#1195](https://github.com/Shopify/sarama/pull/1195)). - -Bug Fixes: - - Fix build without cgo - ([#1182](https://github.com/Shopify/sarama/pull/1182)). - - Fix go vet suggestion in consumer group file - ([#1209](https://github.com/Shopify/sarama/pull/1209)). - - Fix typos in code and comments - ([#1228](https://github.com/Shopify/sarama/pull/1228)). - -## Version 1.19.0 (2018-09-27) - -New Features: - - Implement a higher-level consumer group - ([#1099](https://github.com/Shopify/sarama/pull/1099)). - -Improvements: - - Add support for Go 1.11 - ([#1176](https://github.com/Shopify/sarama/pull/1176)). - -Bug Fixes: - - Fix encoding of `MetadataResponse` with version 2 and higher - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - - Fix race condition in mock async producer - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - -## Version 1.18.0 (2018-09-07) - -New Features: - - Make `Partitioner.RequiresConsistency` vary per-message - ([#1112](https://github.com/Shopify/sarama/pull/1112)). - - Add customizable partitioner - ([#1118](https://github.com/Shopify/sarama/pull/1118)). - - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, - `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` - ([#1055](https://github.com/Shopify/sarama/pull/1055)). - -Improvements: - - Add support for Kafka 2.0.0 - ([#1149](https://github.com/Shopify/sarama/pull/1149)). - - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts - ([#1123](https://github.com/Shopify/sarama/pull/1123)). - - Simpler offset management - ([#1127](https://github.com/Shopify/sarama/pull/1127)). - -Bug Fixes: - - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka - ([#1110](https://github.com/Shopify/sarama/pull/1110)). - - Fix consumer block when response did not contain all the - expected topic/partition blocks - ([#1086](https://github.com/Shopify/sarama/pull/1086)). - - Fix consumer block when response contains only constrol messages - ([#1115](https://github.com/Shopify/sarama/pull/1115)). - - Add timeout config for ClusterAdmin requests - ([#1142](https://github.com/Shopify/sarama/pull/1142)). - - Add version check when producing message with headers - ([#1117](https://github.com/Shopify/sarama/pull/1117)). - - Fix `MetadataRequest` for empty list of topics - ([#1132](https://github.com/Shopify/sarama/pull/1132)). - - Fix producer topic metadata on-demand fetch when topic error happens in metadata response - ([#1125](https://github.com/Shopify/sarama/pull/1125)). - -## Version 1.17.0 (2018-05-30) - -New Features: - - Add support for gzip compression levels - ([#1044](https://github.com/Shopify/sarama/pull/1044)). - - Add support for Metadata request/response pairs versions v1 to v5 - ([#1047](https://github.com/Shopify/sarama/pull/1047), - [#1069](https://github.com/Shopify/sarama/pull/1069)). - - Add versioning to JoinGroup request/response pairs - ([#1098](https://github.com/Shopify/sarama/pull/1098)) - - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs - ([#1065](https://github.com/Shopify/sarama/pull/1065), - [#1096](https://github.com/Shopify/sarama/pull/1096), - [#1027](https://github.com/Shopify/sarama/pull/1027)). - - Add `Controller()` method to Client interface - ([#1063](https://github.com/Shopify/sarama/pull/1063)). - -Improvements: - - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp - ([#1010](https://github.com/Shopify/sarama/pull/1010)). - - Expose missing protocol parts: `msgSet` and `recordBatch` - ([#1049](https://github.com/Shopify/sarama/pull/1049)). - - Add support for v1 DeleteTopics Request - ([#1052](https://github.com/Shopify/sarama/pull/1052)). - - Add support for Go 1.10 - ([#1064](https://github.com/Shopify/sarama/pull/1064)). - - Claim support for Kafka 1.1.0 - ([#1073](https://github.com/Shopify/sarama/pull/1073)). - -Bug Fixes: - - Fix FindCoordinatorResponse.encode to allow nil Coordinator - ([#1050](https://github.com/Shopify/sarama/pull/1050), - [#1051](https://github.com/Shopify/sarama/pull/1051)). - - Clear all metadata when we have the latest topic info - ([#1033](https://github.com/Shopify/sarama/pull/1033)). - - Make `PartitionConsumer.Close` idempotent - ([#1092](https://github.com/Shopify/sarama/pull/1092)). - -## Version 1.16.0 (2018-02-12) - -New Features: - - Add support for the Create/Delete Topics request/response pairs - ([#1007](https://github.com/Shopify/sarama/pull/1007), - [#1008](https://github.com/Shopify/sarama/pull/1008)). - - Add support for the Describe/Create/Delete ACL request/response pairs - ([#1009](https://github.com/Shopify/sarama/pull/1009)). - - Add support for the five transaction-related request/response pairs - ([#1016](https://github.com/Shopify/sarama/pull/1016)). - -Improvements: - - Permit setting version on mock producer responses - ([#999](https://github.com/Shopify/sarama/pull/999)). - - Add `NewMockBrokerListener` helper for testing TLS connections - ([#1019](https://github.com/Shopify/sarama/pull/1019)). - - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB - which results in much higher throughput in most cases - ([#1024](https://github.com/Shopify/sarama/pull/1024)). - - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to - reduce CPU and memory usage when processing many partitions - ([#1028](https://github.com/Shopify/sarama/pull/1028)). - - Assign relative offsets to messages in the producer to save the brokers a - recompression pass - ([#1002](https://github.com/Shopify/sarama/pull/1002), - [#1015](https://github.com/Shopify/sarama/pull/1015)). - -Bug Fixes: - - Fix producing uncompressed batches with the new protocol format - ([#1032](https://github.com/Shopify/sarama/issues/1032)). - - Fix consuming compacted topics with the new protocol format - ([#1005](https://github.com/Shopify/sarama/issues/1005)). - - Fix consuming topics with a mix of protocol formats - ([#1021](https://github.com/Shopify/sarama/issues/1021)). - - Fix consuming when the broker includes multiple batches in a single response - ([#1022](https://github.com/Shopify/sarama/issues/1022)). - - Fix detection of `PartialTrailingMessage` when the partial message was - truncated before the magic value indicating its version - ([#1030](https://github.com/Shopify/sarama/pull/1030)). - - Fix expectation-checking in the mock of `SyncProducer.SendMessages` - ([#1035](https://github.com/Shopify/sarama/pull/1035)). - -## Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -## Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -## Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -## Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -## Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -## Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -## Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -## Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -## Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -## Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -## Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -## Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -## Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -## Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -## Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -## Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -## Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -## Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -## Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -## Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -## Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/Shopify/sarama/Dockerfile.kafka deleted file mode 100644 index 48a9c178ae676..0000000000000 --- a/vendor/github.com/Shopify/sarama/Dockerfile.kafka +++ /dev/null @@ -1,27 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:latest - -USER root - -RUN microdnf update \ - && microdnf install curl gzip java-11-openjdk-headless tar \ - && microdnf clean all - -ENV JAVA_HOME=/usr/lib/jvm/jre-11 - -# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html -# Ensure Java doesn't cache any dns results -RUN cd /etc/java/java-11-openjdk/*/conf/security \ - && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ - && echo 'networkaddress.cache.ttl=0' >> java.security \ - && echo 'networkaddress.cache.negative.ttl=0' >> java.security - -# https://github.com/apache/kafka/blob/53eeaad946cd053e9eb1a762972d4efeacb8e4fc/tests/docker/Dockerfile#L65-L69 -ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" -RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" -RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" -RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" -RUN mkdir -p "/opt/kafka-3.3.1" && chmod a+rw /opt/kafka-3.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.1" - -COPY entrypoint.sh / - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go deleted file mode 100644 index aa7fb749868be..0000000000000 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "sync" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4/v4" -) - -var ( - lz4ReaderPool = sync.Pool{ - New: func() interface{} { - return lz4.NewReader(nil) - }, - } - - gzipReaderPool sync.Pool -) - -func decompress(cc CompressionCodec, data []byte) ([]byte, error) { - switch cc { - case CompressionNone: - return data, nil - case CompressionGZIP: - var err error - reader, ok := gzipReaderPool.Get().(*gzip.Reader) - if !ok { - reader, err = gzip.NewReader(bytes.NewReader(data)) - } else { - err = reader.Reset(bytes.NewReader(data)) - } - - if err != nil { - return nil, err - } - - defer gzipReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionSnappy: - return snappy.Decode(data) - case CompressionLZ4: - reader, ok := lz4ReaderPool.Get().(*lz4.Reader) - if !ok { - reader = lz4.NewReader(bytes.NewReader(data)) - } else { - reader.Reset(bytes.NewReader(data)) - } - defer lz4ReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionZSTD: - return zstdDecompress(ZstdDecoderParams{}, nil, data) - default: - return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} - } -} diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/Shopify/sarama/entrypoint.sh deleted file mode 100644 index 8cd2efcb955ba..0000000000000 --- a/vendor/github.com/Shopify/sarama/entrypoint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -KAFKA_VERSION="${KAFKA_VERSION:-3.3.1}" -KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" - -if [ ! -d "${KAFKA_HOME}" ]; then - echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME - exit 1 -fi - -cd "${KAFKA_HOME}" || exit 1 - -# discard all empty/commented lines -sed -e '/^#/d' -e '/^$/d' -i"" config/server.properties - -# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka -for var in "${!KAFKA_CFG_@}"; do - key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" - sed -e '/^'$key'/d' -i"" config/server.properties - value="${!var}" - echo "$key=$value" >>config/server.properties -done - -sort config/server.properties - -exec bin/kafka-server-start.sh config/server.properties diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go deleted file mode 100644 index 4553b2d2ea069..0000000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ /dev/null @@ -1,27 +0,0 @@ -package sarama - -type ListGroupsRequest struct{} - -func (r *ListGroupsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ListGroupsRequest) key() int16 { - return 16 -} - -func (r *ListGroupsRequest) version() int16 { - return 0 -} - -func (r *ListGroupsRequest) headerVersion() int16 { - return 1 -} - -func (r *ListGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go deleted file mode 100644 index 777bae7e63ea9..0000000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -type ListGroupsResponse struct { - Err KError - Groups map[string]string -} - -func (r *ListGroupsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - for groupId, protocolType := range r.Groups { - if err := pe.putString(groupId); err != nil { - return err - } - if err := pe.putString(protocolType); err != nil { - return err - } - } - - return nil -} - -func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Groups = make(map[string]string) - for i := 0; i < n; i++ { - groupId, err := pd.getString() - if err != nil { - return err - } - protocolType, err := pd.getString() - if err != nil { - return err - } - - r.Groups[groupId] = protocolType - } - - return nil -} - -func (r *ListGroupsResponse) key() int16 { - return 16 -} - -func (r *ListGroupsResponse) version() int16 { - return 0 -} - -func (r *ListGroupsResponse) headerVersion() int16 { - return 0 -} - -func (r *ListGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go deleted file mode 100644 index a1b6ac09cb0d3..0000000000000 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ /dev/null @@ -1,108 +0,0 @@ -package sarama - -type MetadataRequest struct { - // Version defines the protocol version to use for encode and decode - Version int16 - // Topics contains the topics to fetch metadata for. - Topics []string - // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so. - AllowAutoTopicCreation bool -} - -func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest { - m := &MetadataRequest{Topics: topics} - if version.IsAtLeast(V2_1_0_0) { - m.Version = 7 - } else if version.IsAtLeast(V2_0_0_0) { - m.Version = 6 - } else if version.IsAtLeast(V1_0_0_0) { - m.Version = 5 - } else if version.IsAtLeast(V0_10_0_0) { - m.Version = 1 - } - return m -} - -func (r *MetadataRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 12 { - return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} - } - if r.Version == 0 || len(r.Topics) > 0 { - err := pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - - for i := range r.Topics { - err = pe.putString(r.Topics[i]) - if err != nil { - return err - } - } - } else { - pe.putInt32(-1) - } - - if r.Version >= 4 { - pe.putBool(r.AllowAutoTopicCreation) - } - - return nil -} - -func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - size, err := pd.getInt32() - if err != nil { - return err - } - if size > 0 { - r.Topics = make([]string, size) - for i := range r.Topics { - topic, err := pd.getString() - if err != nil { - return err - } - r.Topics[i] = topic - } - } - - if r.Version >= 4 { - if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil { - return err - } - } - - return nil -} - -func (r *MetadataRequest) key() int16 { - return 3 -} - -func (r *MetadataRequest) version() int16 { - return r.Version -} - -func (r *MetadataRequest) headerVersion() int16 { - return 1 -} - -func (r *MetadataRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_10_0_0 - case 2: - return V0_10_1_0 - case 3, 4: - return V0_11_0_0 - case 5: - return V1_0_0_0 - case 6: - return V2_0_0_0 - case 7: - return V2_1_0_0 - default: - return MinVersion - } -} diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md index 2d1b3d93225d8..76f50073976cb 100644 --- a/vendor/github.com/eapache/go-resiliency/breaker/README.md +++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md @@ -1,7 +1,7 @@ circuit-breaker =============== -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![Golang CI](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml/badge.svg)](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml) [![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go index f88ca7248b0fd..9214386254f49 100644 --- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -12,10 +12,13 @@ import ( // because the breaker is currently open. var ErrBreakerOpen = errors.New("circuit breaker is open") +// State is a type representing the possible states of a circuit breaker. +type State uint32 + const ( - closed uint32 = iota - open - halfOpen + Closed State = iota + Open + HalfOpen ) // Breaker implements the circuit-breaker resiliency pattern @@ -24,7 +27,7 @@ type Breaker struct { timeout time.Duration lock sync.Mutex - state uint32 + state State errors, successes int lastError time.Time } @@ -46,9 +49,9 @@ func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { // already open, or it will run the given function and pass along its return // value. It is safe to call Run concurrently on the same Breaker. func (b *Breaker) Run(work func() error) error { - state := atomic.LoadUint32(&b.state) + state := b.GetState() - if state == open { + if state == Open { return ErrBreakerOpen } @@ -61,9 +64,9 @@ func (b *Breaker) Run(work func() error) error { // the return value of the function. It is safe to call Go concurrently on the // same Breaker. func (b *Breaker) Go(work func() error) error { - state := atomic.LoadUint32(&b.state) + state := b.GetState() - if state == open { + if state == Open { return ErrBreakerOpen } @@ -75,7 +78,13 @@ func (b *Breaker) Go(work func() error) error { return nil } -func (b *Breaker) doWork(state uint32, work func() error) error { +// GetState returns the current State of the circuit-breaker at the moment +// that it is called. +func (b *Breaker) GetState() State { + return (State)(atomic.LoadUint32((*uint32)(&b.state))) +} + +func (b *Breaker) doWork(state State, work func() error) error { var panicValue interface{} result := func() error { @@ -85,7 +94,7 @@ func (b *Breaker) doWork(state uint32, work func() error) error { return work() }() - if result == nil && panicValue == nil && state == closed { + if result == nil && panicValue == nil && state == Closed { // short-circuit the normal, success path without contending // on the lock return nil @@ -108,7 +117,7 @@ func (b *Breaker) processResult(result error, panicValue interface{}) { defer b.lock.Unlock() if result == nil && panicValue == nil { - if b.state == halfOpen { + if b.state == HalfOpen { b.successes++ if b.successes == b.successThreshold { b.closeBreaker() @@ -123,26 +132,26 @@ func (b *Breaker) processResult(result error, panicValue interface{}) { } switch b.state { - case closed: + case Closed: b.errors++ if b.errors == b.errorThreshold { b.openBreaker() } else { b.lastError = time.Now() } - case halfOpen: + case HalfOpen: b.openBreaker() } } } func (b *Breaker) openBreaker() { - b.changeState(open) + b.changeState(Open) go b.timer() } func (b *Breaker) closeBreaker() { - b.changeState(closed) + b.changeState(Closed) } func (b *Breaker) timer() { @@ -151,11 +160,11 @@ func (b *Breaker) timer() { b.lock.Lock() defer b.lock.Unlock() - b.changeState(halfOpen) + b.changeState(HalfOpen) } -func (b *Breaker) changeState(newState uint32) { +func (b *Breaker) changeState(newState State) { b.errors = 0 b.successes = 0 - atomic.StoreUint32(&b.state, newState) + atomic.StoreUint32((*uint32)(&b.state), (uint32)(newState)) } diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go index e2c1714ff7e32..c2eb205070246 100644 --- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -83,13 +83,23 @@ func Decode(src []byte) ([]byte, error) { // for use by this function. If `dst` is nil *or* insufficiently large to hold // the decoded `src`, new space will be allocated. func DecodeInto(dst, src []byte) ([]byte, error) { + if len(src) < 8 || !bytes.Equal(src[:8], xerialHeader) { + dst, err := master.Decode(dst[:cap(dst)], src) + if err != nil && len(src) < len(xerialHeader) { + // Keep compatibility and return ErrMalformed when there is a + // short or truncated header. + return nil, ErrMalformed + } + return dst, err + } + var max = len(src) if max < len(xerialHeader) { return nil, ErrMalformed } - if !bytes.Equal(src[:8], xerialHeader) { - return master.Decode(dst[:cap(dst)], src) + if max == sizeOffset { + return []byte{}, nil } if max < sizeOffset+sizeBytes { diff --git a/vendor/modules.txt b/vendor/modules.txt index e546d6d13361f..024d0099312bd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -261,6 +261,9 @@ github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil github.com/IBM/ibm-cos-sdk-go/service/s3 github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface +# github.com/IBM/sarama v1.43.3 +## explicit; go 1.19 +github.com/IBM/sarama # github.com/Masterminds/goutils v1.1.1 ## explicit github.com/Masterminds/goutils @@ -280,9 +283,6 @@ github.com/Microsoft/go-winio/pkg/guid # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler -# github.com/Shopify/sarama v1.38.1 -## explicit; go 1.17 -github.com/Shopify/sarama # github.com/Workiva/go-datastructures v1.1.5 ## explicit; go 1.15 github.com/Workiva/go-datastructures/rangetree @@ -667,10 +667,10 @@ github.com/drone/envsubst/path # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.3.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker -# github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 +# github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 ## explicit; go 1.17 github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 @@ -1171,7 +1171,7 @@ github.com/jcmturner/dnsutils/v2 ## explicit; go 1.7 github.com/jcmturner/gofork/encoding/asn1 github.com/jcmturner/gofork/x/crypto/pbkdf2 -# github.com/jcmturner/gokrb5/v8 v8.4.3 +# github.com/jcmturner/gokrb5/v8 v8.4.4 ## explicit; go 1.16 github.com/jcmturner/gokrb5/v8/asn1tools github.com/jcmturner/gokrb5/v8/client