pax_global_header00006660000000000000000000000064135600447430014520gustar00rootroot0000000000000052 comment=3fc0c996f834c4f69dddbde6d9f5aef892dbec1b kafka-2.1.1/000077500000000000000000000000001356004474300125765ustar00rootroot00000000000000kafka-2.1.1/.circleci/000077500000000000000000000000001356004474300144315ustar00rootroot00000000000000kafka-2.1.1/.circleci/config.yml000066400000000000000000000010021356004474300164120ustar00rootroot00000000000000# Golang CircleCI 2.0 configuration file # Check https://circleci.com/docs/2.0/language-go/ for more details version: 2 jobs: build: docker: # specify the version - image: circleci/golang:1.12 working_directory: /go/src/github.com/optiopay/kafka steps: - checkout - run: go get github.com/kisielk/errcheck - run: go get -v -t -d ./... - run: go vet ./... - run: errcheck ./... - run: go test -v -race -timeout=5m $(go list ./... | grep -v integration) kafka-2.1.1/.github/000077500000000000000000000000001356004474300141365ustar00rootroot00000000000000kafka-2.1.1/.github/workflows/000077500000000000000000000000001356004474300161735ustar00rootroot00000000000000kafka-2.1.1/.github/workflows/pr.yml000066400000000000000000000013661356004474300173450ustar00rootroot00000000000000name: Kafka on: pull_request: branches: - master jobs: test: runs-on: ubuntu-latest strategy: matrix: go: ["1.12.x"] name: Go ${{ matrix.go }} test steps: - name: Checkout code uses: actions/checkout@v1 - name: Setup go uses: actions/setup-go@v1 with: go-version: ${{ matrix.go }} - name: Run tests for v2 run: cd v2 && go test ./... env: GO111MODULE: on lint: runs-on: ubuntu-latest name: Lint steps: - name: Checkout code uses: actions/checkout@v1 - name: golangci-lint uses: docker://reviewdog/action-golangci-lint:v1 with: github_token: ${{ secrets.github_token }} kafka-2.1.1/.gitignore000066400000000000000000000000071356004474300145630ustar00rootroot00000000000000!.git* kafka-2.1.1/CODEOWNERS000066400000000000000000000002311356004474300141650ustar00rootroot00000000000000* samuel.el-borai@optiopay.com sergei.tiutnev@optiopay.com mikhail.kochegarov@optiopay.com maksim.zhylinski@optiopay.com max.lavrenov@optiopay.com kafka-2.1.1/LICENSE000066400000000000000000000020411356004474300136000ustar00rootroot00000000000000Copyright (c) 2015-2018 Optiopay Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. kafka-2.1.1/README.md000066400000000000000000000053201356004474300140550ustar00rootroot00000000000000[![Tests status](https://github.com/optiopay/kafka/workflows/Kafka/badge.svg)](https://github.com/optiopay/kafka/actions?query=workflow%3AKafka) [![GoDoc](https://godoc.org/github.com/optiopay/kafka/v2?status.png)](https://godoc.org/github.com/optiopay/kafka/v2) # Kafka Kafka is Go client library for [Apache Kafka](https://kafka.apache.org/) server, released under [MIT license](LICENSE]). Kafka provides minimal abstraction over wire protocol, support for transparent failover and easy to use blocking API. * [godoc](https://godoc.org/github.com/optiopay/kafka/v2) generated documentation, * [code examples](https://godoc.org/github.com/optiopay/kafka/v2#pkg-examples) ## Example Write all messages from stdin to kafka and print all messages from kafka topic to stdout. ```go package main import ( "bufio" "log" "os" "strings" "github.com/optiopay/kafka/v2" "github.com/optiopay/kafka/v2/proto" ) const ( topic = "my-messages" partition = 0 ) var kafkaAddrs = []string{"localhost:9092", "localhost:9093"} // printConsumed read messages from kafka and print them out func printConsumed(broker kafka.Client) { conf := kafka.NewConsumerConf(topic, partition) conf.StartOffset = kafka.StartOffsetNewest consumer, err := broker.Consumer(conf) if err != nil { log.Fatalf("cannot create kafka consumer for %s:%d: %s", topic, partition, err) } for { msg, err := consumer.Consume() if err != nil { if err != kafka.ErrNoData { log.Printf("cannot consume %q topic message: %s", topic, err) } break } log.Printf("message %d: %s", msg.Offset, msg.Value) } log.Print("consumer quit") } // produceStdin read stdin and send every non empty line as message func produceStdin(broker kafka.Client) { producer := broker.Producer(kafka.NewProducerConf()) input := bufio.NewReader(os.Stdin) for { line, err := input.ReadString('\n') if err != nil { log.Fatalf("input error: %s", err) } line = strings.TrimSpace(line) if line == "" { continue } msg := &proto.Message{Value: []byte(line)} if _, err := producer.Produce(topic, partition, msg); err != nil { log.Fatalf("cannot produce message to %s:%d: %s", topic, partition, err) } } } func main() { conf := kafka.NewBrokerConf("test-client") conf.AllowTopicCreation = true // connect to kafka cluster broker, err := kafka.Dial(kafkaAddrs, conf) if err != nil { log.Fatalf("cannot connect to kafka cluster: %s", err) } defer broker.Close() go printConsumed(broker) produceStdin(broker) } ``` kafka-2.1.1/go.mod000066400000000000000000000000521356004474300137010ustar00rootroot00000000000000module github.com/optiopay/kafka go 1.13 kafka-2.1.1/testkeys/000077500000000000000000000000001356004474300144515ustar00rootroot00000000000000kafka-2.1.1/testkeys/ca.cnf000066400000000000000000000050371356004474300155310ustar00rootroot00000000000000# we use 'ca' as the default section because we're usign the ca command # we use 'ca' as the default section because we're usign the ca command [ ca ] default_ca = my_ca [ my_ca ] # a text file containing the next serial number to use in hex. Mandatory. # This file must be present and contain a valid serial number. serial = ./serial # the text database file to use. Mandatory. This file must be present though # initially it will be empty. database = ./index.txt # specifies the directory where new certificates will be placed. Mandatory. new_certs_dir = ./newcerts # the file containing the CA certificate. Mandatory certificate = ./ca.crt # the file contaning the CA private key. Mandatory private_key = ./ca.key # the message digest algorithm. Remember to not use MD5 default_md = sha1 # for how many days will the signed certificate be valid default_days = 3650 # a section with a set of variables corresponding to DN fields policy = my_policy [ my_policy ] # if the value is "match" then the field value must match the same field in the # CA certificate. If the value is "supplied" then it must be present. # Optional means it may be present. Any fields not mentioned are silently # deleted. countryName = match stateOrProvinceName = supplied organizationName = supplied commonName = supplied organizationalUnitName = optional commonName = supplied [ ca ] default_ca = my_ca [ my_ca ] # a text file containing the next serial number to use in hex. Mandatory. # This file must be present and contain a valid serial number. serial = ./serial # the text database file to use. Mandatory. This file must be present though # initially it will be empty. database = ./index.txt # specifies the directory where new certificates will be placed. Mandatory. new_certs_dir = ./newcerts # the file containing the CA certificate. Mandatory certificate = ./ca.crt # the file contaning the CA private key. Mandatory private_key = ./ca.key # the message digest algorithm. Remember to not use MD5 default_md = sha1 # for how many days will the signed certificate be valid default_days = 3650 # a section with a set of variables corresponding to DN fields policy = my_policy [ my_policy ] # if the value is "match" then the field value must match the same field in the # CA certificate. If the value is "supplied" then it must be present. # Optional means it may be present. Any fields not mentioned are silently # deleted. countryName = match stateOrProvinceName = supplied organizationName = supplied commonName = supplied organizationalUnitName = optional commonName = supplied kafka-2.1.1/testkeys/ca.crt000066400000000000000000000024011356004474300155430ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDhTCCAm2gAwIBAgIJAJc7avZoW8KVMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV BAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxpbjERMA8GA1UE CgwIT3B0aW9wYXkxFTATBgNVBAMMDG9wdGlvcGF5LmNvbTAeFw0xODA4MjgwNzA3 NDVaFw0yODA4MjUwNzA3NDVaMFkxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJs aW4xDzANBgNVBAcMBkJlcmxpbjERMA8GA1UECgwIT3B0aW9wYXkxFTATBgNVBAMM DG9wdGlvcGF5LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMSF W9t0k3JlBeVxGVRTowfB/+VsC6NyPLIikOl81LK40Nbo/LdsB2jbj7mpS+Kli3Ij /ySrSKxNLZGZuOZsw21HkoSKpX1Ymm4wJE76J17zHnzoRMrfawKs/344EPf2dKSk u5Gqk16uvyeIULA11UTb+dCdpyklrcFnBMj2Ud+ANi54ZlQrdpejAEBkJYen4wtw tudtavAWtlm83f6Nt+xKL03H9jPJiHngHWSTFfe5l2wMMz9KlEWvA0GbmSKMYt+0 qpGoMWD3tpp7ZIw5rNFeJTtx4ZycFoaJn4NkeSYi2PMwjPsQh1EOmSm2JsKVzUu9 +NwxdPEehJbTzQHJ8z8CAwEAAaNQME4wHQYDVR0OBBYEFGETC+j4RjHOcoCYc4yW UcSGJOmzMB8GA1UdIwQYMBaAFGETC+j4RjHOcoCYc4yWUcSGJOmzMAwGA1UdEwQF MAMBAf8wDQYJKoZIhvcNAQELBQADggEBACORIPYQHYTqhGTG7dkaakDy7ArBtrxR AYwl7PhWmvRA8/PjWtcfm7SgUZsR8bwEG5wMzeu63hmYMm0z7G7KmofXXy2ToeSM k3iUJbn4f8ArFDeB/w0Og46W2e5lHqqXTwPWF4P6LtM6QMkbnXOi1B4wv3jyzO/c qQlhwXjZEHQVJUe2/7CmYStnmPW+FJCW52RFeNZkKZ90p5hg2Tv8dB1VH0NfabCR KwQTLtqRVDPjFnCZgiu+SVBAkXV3IYYLqSEKreNY2R9C2oIJQAnoJGQURv2LjnSp QD64LgYBqyvMrbuJ6PlPOpQQXAjH1SnyDsKkZeuxjK/rwEo2+demY08= -----END CERTIFICATE----- kafka-2.1.1/testkeys/generate_certificates.sh000077500000000000000000000012721356004474300213310ustar00rootroot00000000000000set -o xtrace find . -type f ! -name '*.sh' ! -name '*.cnf' -delete rm -rf ./newcerts/ #openssl genrsa -out example.org.key 2048 openssl genrsa -out example.org.key 2048 openssl req -new -key example.org.key -out example.org.csr -subj "/C=DE/ST=Berlin/L=Berlin/O=Optiopay/CN=api.optiopay.com" openssl req -new -out oats.csr -config oats.cnf openssl genrsa -out ca.key 2048 openssl req -new -x509 -days 3650 -key ca.key -out ca.crt -subj "/C=DE/ST=Berlin/L=Berlin/O=Optiopay/CN=optiopay.com" mkdir newcerts touch index.txt echo '01' > serial openssl ca -config ca.cnf -out example.org.crt -infiles example.org.csr openssl ca -config ca.cnf -out oats.crt -extfile oats.extensions.cnf -in oats.csr kafka-2.1.1/testkeys/oats.cnf000066400000000000000000000042051356004474300161100ustar00rootroot00000000000000# The main section is named req because the command we are using is req # (openssl req ...) [ req ] # This specifies the default key size in bits. If not specified then 512 is # used. It is used if the -new option is used. It can be overridden by using # the -newkey option. default_bits = 2048 # This is the default filename to write a private key to. If not specified the # key is written to standard output. This can be overridden by the -keyout # option. default_keyfile = oats.key # If this is set to no then if a private key is generated it is not encrypted. # This is equivalent to the -nodes command line option. For compatibility # encrypt_rsa_key is an equivalent option. encrypt_key = no # This option specifies the digest algorithm to use. Possible values include # md5 sha1 mdc2. If not present then MD5 is used. This option can be overridden # on the command line. default_md = sha1 # if set to the value no this disables prompting of certificate fields and just # takes values from the config file directly. It also changes the expected # format of the distinguished_name and attributes sections. prompt = no # if set to the value yes then field values to be interpreted as UTF8 strings, # by default they are interpreted as ASCII. This means that the field values, # whether prompted from a terminal or obtained from a configuration file, must # be valid UTF8 strings. utf8 = yes # This specifies the section containing the distinguished name fields to # prompt for when generating a certificate or certificate request. distinguished_name = my_req_distinguished_name # this specifies the configuration file section containing a list of extensions # to add to the certificate request. It can be overridden by the -reqexts # command line switch. See the x509v3_config(5) manual page for details of the # extension section format. req_extensions = my_extensions [ my_req_distinguished_name ] C = DE ST = Berlin L = Berlin O = Optiopay CN = optiopay.com [ my_extensions ] basicConstraints=CA:FALSE subjectAltName=@my_subject_alt_names subjectKeyIdentifier = hash [ my_subject_alt_names ] DNS.1 = localhost DNS.2 = 0.0.0.0 IP.1 = 0.0.0.0 IP.2 = 127.0.0.1 kafka-2.1.1/testkeys/oats.crt000066400000000000000000000102241356004474300161300ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 2 (0x2) Signature Algorithm: sha1WithRSAEncryption Issuer: C=DE, ST=Berlin, L=Berlin, O=Optiopay, CN=optiopay.com Validity Not Before: Aug 28 07:07:47 2018 GMT Not After : Aug 25 07:07:47 2028 GMT Subject: C=DE, ST=Berlin, O=Optiopay, CN=optiopay.com Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: 00:cd:c1:7f:06:42:90:7c:a3:dc:65:02:4d:83:b1: ee:64:b1:69:0b:73:63:ca:79:4c:7b:e2:33:10:c9: 4a:41:83:6d:a4:6f:77:42:ab:00:55:6e:b7:64:80: 93:a1:2f:58:3c:06:9a:e4:83:38:55:dd:60:fd:ef: a2:c5:0f:2f:b7:8c:c5:1a:95:fa:d9:ea:ec:cd:94: ae:af:cb:ea:2b:53:dd:ab:1f:91:ec:83:4a:8c:ed: 18:f3:25:a9:3e:15:63:8f:9f:92:9f:53:d2:8a:91: 42:bd:ad:fa:07:56:86:80:59:b3:23:96:ce:d7:36: 8f:4d:6b:7f:3f:9b:0e:bf:ea:18:38:71:73:63:d3: d8:01:fc:b1:f4:85:a7:7a:b2:13:2a:67:e7:ab:17: 72:97:a6:79:0f:1b:81:98:83:20:22:56:a0:44:c9: bc:17:01:6c:2b:63:e2:55:bd:50:98:d1:1d:0d:c6: 92:0f:a1:a3:60:63:fc:82:94:3f:a8:e7:4f:a5:87: f2:82:86:8d:df:0c:04:38:b7:a3:0a:e4:87:f4:6f: cb:48:b6:cb:18:0c:f4:16:d5:b7:b4:cb:90:0e:ff: af:62:85:94:03:ba:71:bb:4e:17:59:60:23:12:cd: 2c:9b:b1:21:6d:d5:10:2d:b9:04:52:a3:ef:67:aa: af:3b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE X509v3 Subject Alternative Name: DNS:localhost, DNS:0.0.0.0, IP Address:127.0.0.1 X509v3 Subject Key Identifier: EF:40:EB:09:B2:FE:54:99:D0:F2:C0:FF:8D:F3:E6:C4:98:57:C5:13 Signature Algorithm: sha1WithRSAEncryption 11:71:a8:3c:2b:56:c7:ee:54:13:9c:ba:4b:d9:b9:db:07:48: e8:d6:be:0f:11:78:e2:bc:b8:10:66:6f:d8:26:55:cb:d6:b3: 88:15:3d:a9:cd:63:4a:e6:e6:97:72:15:2e:f1:d2:5b:72:22: 54:b3:4d:9b:7f:b6:56:24:df:c9:e9:bd:47:82:26:f1:bc:5e: 58:da:6b:11:66:e4:d6:9e:65:b6:ae:dc:13:94:b8:00:49:b3: 01:f3:7c:fb:5b:03:e7:ac:d4:df:ea:e9:18:e5:f6:6f:c0:5d: 9f:81:fa:cf:ea:5f:94:02:87:67:c9:68:53:f1:7e:e4:33:68: 70:01:e0:46:a7:bb:65:13:12:63:64:ab:91:52:b6:7f:ab:9e: f4:48:41:4e:6d:44:6c:22:7c:28:9c:67:90:31:1d:86:29:b0: f5:be:cd:63:c0:b4:fd:68:88:88:97:f1:cf:c4:60:37:d6:e8: bd:6f:bb:0a:68:98:7c:64:7d:09:ef:a0:db:91:2d:a2:80:31: ff:3c:8a:3c:2f:33:39:a1:83:c2:a5:cc:1c:9d:42:ce:1a:76: a2:e3:34:c5:c9:61:90:6b:07:0d:b2:40:14:ea:50:1d:71:3a: 6d:ed:d2:ac:ca:55:2d:ba:f8:65:d4:0c:84:d0:dc:1e:53:0c: f3:00:0f:76 -----BEGIN CERTIFICATE----- MIIDbTCCAlWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBZMQswCQYDVQQGEwJERTEP MA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xETAPBgNVBAoMCE9wdGlv cGF5MRUwEwYDVQQDDAxvcHRpb3BheS5jb20wHhcNMTgwODI4MDcwNzQ3WhcNMjgw ODI1MDcwNzQ3WjBIMQswCQYDVQQGEwJERTEPMA0GA1UECAwGQmVybGluMREwDwYD VQQKDAhPcHRpb3BheTEVMBMGA1UEAwwMb3B0aW9wYXkuY29tMIIBIjANBgkqhkiG 9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzcF/BkKQfKPcZQJNg7HuZLFpC3NjynlMe+Iz EMlKQYNtpG93QqsAVW63ZICToS9YPAaa5IM4Vd1g/e+ixQ8vt4zFGpX62erszZSu r8vqK1Pdqx+R7INKjO0Y8yWpPhVjj5+Sn1PSipFCva36B1aGgFmzI5bO1zaPTWt/ P5sOv+oYOHFzY9PYAfyx9IWnerITKmfnqxdyl6Z5DxuBmIMgIlagRMm8FwFsK2Pi Vb1QmNEdDcaSD6GjYGP8gpQ/qOdPpYfygoaN3wwEOLejCuSH9G/LSLbLGAz0FtW3 tMuQDv+vYoWUA7pxu04XWWAjEs0sm7EhbdUQLbkEUqPvZ6qvOwIDAQABo1EwTzAJ BgNVHRMEAjAAMCMGA1UdEQQcMBqCCWxvY2FsaG9zdIIHMC4wLjAuMIcEfwAAATAd BgNVHQ4EFgQU70DrCbL+VJnQ8sD/jfPmxJhXxRMwDQYJKoZIhvcNAQEFBQADggEB ABFxqDwrVsfuVBOcukvZudsHSOjWvg8ReOK8uBBmb9gmVcvWs4gVPanNY0rm5pdy FS7x0ltyIlSzTZt/tlYk38npvUeCJvG8XljaaxFm5NaeZbau3BOUuABJswHzfPtb A+es1N/q6Rjl9m/AXZ+B+s/qX5QCh2fJaFPxfuQzaHAB4Eanu2UTEmNkq5FStn+r nvRIQU5tRGwifCicZ5AxHYYpsPW+zWPAtP1oiIiX8c/EYDfW6L1vuwpomHxkfQnv oNuRLaKAMf88ijwvMzmhg8KlzBydQs4adqLjNMXJYZBrBw2yQBTqUB1xOm3t0qzK VS26+GXUDITQ3B5TDPMAD3Y= -----END CERTIFICATE----- kafka-2.1.1/testkeys/oats.extensions.cnf000066400000000000000000000002461356004474300203070ustar00rootroot00000000000000basicConstraints=CA:FALSE subjectAltName=@my_subject_alt_names subjectKeyIdentifier = hash [ my_subject_alt_names ] DNS.1 = localhost DNS.2 = 0.0.0.0 IP = 127.0.0.1 kafka-2.1.1/testkeys/oats.key000066400000000000000000000032541356004474300161350ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDNwX8GQpB8o9xl Ak2Dse5ksWkLc2PKeUx74jMQyUpBg22kb3dCqwBVbrdkgJOhL1g8BprkgzhV3WD9 76LFDy+3jMUalfrZ6uzNlK6vy+orU92rH5Hsg0qM7RjzJak+FWOPn5KfU9KKkUK9 rfoHVoaAWbMjls7XNo9Na38/mw6/6hg4cXNj09gB/LH0had6shMqZ+erF3KXpnkP G4GYgyAiVqBEybwXAWwrY+JVvVCY0R0NxpIPoaNgY/yClD+o50+lh/KCho3fDAQ4 t6MK5If0b8tItssYDPQW1be0y5AO/69ihZQDunG7ThdZYCMSzSybsSFt1RAtuQRS o+9nqq87AgMBAAECggEBAKZFF0EXyWrvf8qgccNapir4sspuFNYp74Ss5ldKUpde fdvDt7xIM5zWO50byjEW8VaQf9rprzy0CQ0r+X0FKKpLN3ykdnJjRblbzU6Cmlkp DJicim92KWh4sRDL/lgNMMd/TB00v476k9R0IviLE2XlFr5LwBpqHKOTmuDrjJ/I N7DeeVrVeFwX4JgDXR6CvITZhRUbRmSNlZhqh8ja4MOnWO9+WUxk3Vezejk+6GOF PX3ubmfylDw2c8KGfzPoXitOFxhexw8eNX1t81IdT0ZJbjrdDEaHIkzGxjOEQemM iVL0k1ta7f8zakqH2v+CgbfqtYiH+nnUIFx5BhVIqQECgYEA/g/422JpXmco2pyr FNTh8bY7gynigB+eSU19ntmHxpN5CjuXb+sQ+++JcTihiwLL3fmXSGBYc2axKpQJ 1xAOECe25iTAeE98mIi9K1L9rx5JAxCK9oU6OQEkQ8Gum8Vp/8won0tJRQOMZL6F bAtZcyeAeRglZhI64hWY73/c+T8CgYEAz1M2B9+fl9Z04cjoZGN0bYVkL2C4wjVr /DfLNPGM3B4QB3Zx1GUuQ7OmsO+qOgzn2g4/x30CXHPYo97KcFu2W2TaeOwhgScf t6hh3pUEihXqguaZT68JSoi7uZWiV4bshsDpOQNk6NcmgUVhue63AWiHXpzaMA3P Xss6eZJY7wUCgYEAwg5dHOUcapkNIX4NRwob0s5IwrTZZte/xWnFC/JHvSujhmdi EoBQJr8eEjcZXN6z7UbZgNTmeq7zn/wXUAfZVgmfsEZeJQdBuhintVHcp0fQqOCX 5Bh7ElOG6aIkxqKRhmrglFujCW3ebI0ByDcbCdQgU15YIpDGHS419KmLlW0CgYBx TAHMz7HBIhsBf+8vXQI1D54uHecZimQWks7jgdT+PRd5XIrcDOb5bGV9b5tQ2dih 2wOTmob9yvjqpEdwm03OKBET38Z3c8/rZ4MERd8TLuLW4fmKCBdFue8Mfe8Xcc4z 3ZwV4fr1Gw4ZurarCv7LMDo5w/GgLQvTu6+a3BTpmQKBgQDlr2hwcWcdYFchUkU5 lT5XmTWy54XBbSsXOiNlMPDA2nwGvLv1o9PI2ip7PPOyi8I3cguK0OwgYJxxbpHg 4bibotihARMx9ce4d9i2qtInknnYxb4XBWP5P2l0z4sfPeeJRt7j/YvnuqmAmi3z bA8cFdmO7l0aLfnY5Y10+4Gw9A== -----END PRIVATE KEY----- kafka-2.1.1/v2/000077500000000000000000000000001356004474300131255ustar00rootroot00000000000000kafka-2.1.1/v2/broker.go000066400000000000000000001254101356004474300147430ustar00rootroot00000000000000package kafka import ( "errors" "fmt" "io" "math/rand" "net" "sync" "syscall" "time" "github.com/optiopay/kafka/v2/proto" ) const ( // StartOffsetNewest configures the consumer to fetch messages produced // after creating the consumer. StartOffsetNewest = -1 // StartOffsetOldest configures the consumer to fetch starting from the // oldest message available. StartOffsetOldest = -2 ) var ( // ErrNoData is returned by consumers on Fetch when the retry limit is set // and exceeded. ErrNoData = errors.New("no data") // Make sure interfaces are implemented _ Client = &Broker{} _ Consumer = &consumer{} _ Producer = &producer{} _ OffsetCoordinator = &offsetCoordinator{} ) // Client is the interface implemented by Broker. type Client interface { Producer(conf ProducerConf) Producer Consumer(conf ConsumerConf) (Consumer, error) OffsetCoordinator(conf OffsetCoordinatorConf) (OffsetCoordinator, error) OffsetEarliest(topic string, partition int32) (offset int64, err error) OffsetLatest(topic string, partition int32) (offset int64, err error) Close() } // Consumer is the interface that wraps the Consume method. // // Consume reads a message from a consumer, returning an error when // encountered. type Consumer interface { Consume() (*proto.Message, error) } // BatchConsumer is the interface that wraps the ConsumeBatch method. // // ConsumeBatch reads a batch of messages from a consumer, returning an error // when encountered. type BatchConsumer interface { ConsumeBatch() ([]*proto.Message, error) } // Producer is the interface that wraps the Produce method. // // Produce writes the messages to the given topic and partition. // It returns the offset of the first message and any error encountered. // The offset of each message is also updated accordingly. type Producer interface { Produce(topic string, partition int32, messages ...*proto.Message) (offset int64, err error) } // OffsetCoordinator is the interface which wraps the Commit and Offset methods. type OffsetCoordinator interface { Commit(topic string, partition int32, offset int64) error Offset(topic string, partition int32) (offset int64, metadata string, err error) } type topicPartition struct { topic string partition int32 } func (tp topicPartition) String() string { return fmt.Sprintf("%s:%d", tp.topic, tp.partition) } type clusterMetadata struct { created time.Time nodes map[int32]string // node ID to address endpoints map[topicPartition]int32 // partition to leader node ID partitions map[string]int32 // topic to number of partitions controllerId int32 // ID node which run cluster controller } // BrokerConf represents the configuration of a broker. type BrokerConf struct { // Kafka client ID. ClientID string // LeaderRetryLimit limits the number of connection attempts to a single // node before failing. Use LeaderRetryWait to control the wait time // between retries. // // Defaults to 10. LeaderRetryLimit int // LeaderRetryWait sets a limit to the waiting time when trying to connect // to a single node after failure. // // Defaults to 500ms. // // Timeout on a connection is controlled by the DialTimeout setting. LeaderRetryWait time.Duration // AllowTopicCreation enables a last-ditch "send produce request" which // happens if we do not know about a topic. This enables topic creation // if your Kafka cluster is configured to allow it. // // Defaults to False. AllowTopicCreation bool // Any new connection dial timeout. // // Default is 10 seconds. DialTimeout time.Duration // DialRetryLimit limits the number of connection attempts to every node in // cluster before failing. Use DialRetryWait to control the wait time // between retries. // // Defaults to 10. DialRetryLimit int // DialRetryWait sets a limit to the waiting time when trying to establish // broker connection to single node to fetch cluster metadata. // // Defaults to 500ms. DialRetryWait time.Duration // ReadTimeout is TCP read timeout // // Default is 30 seconds ReadTimeout time.Duration // RetryErrLimit limits the number of retry attempts when an error is // encountered. // // Default is 10. RetryErrLimit int // RetryErrWait controls the wait duration between retries after failed // fetch request. // // Default is 500ms. RetryErrWait time.Duration // DEPRECATED 2015-07-10 - use Logger instead // // TODO(husio) remove // // Logger used by the broker. Log interface { Print(...interface{}) Printf(string, ...interface{}) } // Logger is general logging interface that can be provided by popular // logging frameworks. Used to notify and as replacement for stdlib `log` // package. Logger Logger //Settings for TLS encryption. //You need to set all these parameters to enable TLS //TLS CA pem TLSCa []byte //TLS certificate TLSCert []byte //TLS key TLSKey []byte } func (conf *BrokerConf) useTLS() bool { return (len(conf.TLSCa) > 0 && len(conf.TLSKey) > 0 && len(conf.TLSCert) > 0) } // NewBrokerConf returns the default broker configuration. func NewBrokerConf(clientID string) BrokerConf { return BrokerConf{ ClientID: clientID, DialTimeout: 10 * time.Second, DialRetryLimit: 10, DialRetryWait: 500 * time.Millisecond, AllowTopicCreation: false, LeaderRetryLimit: 10, LeaderRetryWait: 500 * time.Millisecond, RetryErrLimit: 10, RetryErrWait: time.Millisecond * 500, ReadTimeout: 30 * time.Second, Logger: &nullLogger{}, } } // Broker is an abstract connection to kafka cluster, managing connections to // all kafka nodes. type Broker struct { conf BrokerConf mu sync.Mutex metadata clusterMetadata conns map[int32]*connection nodeAddresses []string } // Dial connects to any node from a given list of kafka addresses and after // successful metadata fetch, returns broker. // // The returned broker is not initially connected to any kafka node. func Dial(nodeAddresses []string, conf BrokerConf) (*Broker, error) { if len(nodeAddresses) == 0 { return nil, errors.New("no addresses provided") } broker := &Broker{ conf: conf, conns: make(map[int32]*connection), nodeAddresses: nodeAddresses, } for i := 0; i < conf.DialRetryLimit; i++ { if i > 0 { conf.Logger.Debug("cannot fetch metadata from any connection", "retry", i, "sleep", conf.DialRetryWait) time.Sleep(conf.DialRetryWait) } err := broker.refreshMetadata() if err == nil { return broker, nil } conf.Logger.Error("Got an error trying to fetch metadata", "error", err) } return nil, fmt.Errorf("cannot connect to: %s. TLS authentication: %t", nodeAddresses, conf.useTLS()) } func (b *Broker) getInitialAddresses() []string { dest := make([]string, len(b.nodeAddresses)) perm := rand.Perm(len(b.nodeAddresses)) for i, v := range perm { dest[v] = b.nodeAddresses[i] } return dest } // Close closes the broker and all active kafka nodes connections. func (b *Broker) Close() { b.mu.Lock() defer b.mu.Unlock() for nodeID, conn := range b.conns { if err := conn.Close(); err != nil { b.conf.Logger.Info("cannot close node connection", "nodeID", nodeID, "error", err) } } } // Metadata requests metadata information from any node. func (b *Broker) Metadata() (*proto.MetadataResp, error) { b.mu.Lock() defer b.mu.Unlock() return b.fetchMetadata() } // CreateTopic request topic creation func (b *Broker) CreateTopic(topics []proto.TopicInfo, timeout time.Duration, validateOnly bool) (*proto.CreateTopicsResp, error) { b.mu.Lock() defer b.mu.Unlock() var resp *proto.CreateTopicsResp err := b.callOnClusterController(func(c *connection) error { var err error req := proto.CreateTopicsReq{ Timeout: timeout, CreateTopicsRequests: topics, ValidateOnly: validateOnly, } resp, err = c.CreateTopic(&req) return err }) return resp, err } // refreshMetadata is requesting metadata information from any node and refresh // internal cached representation. // Because it's changing internal state, this method requires lock protection, // but it does not acquire nor release lock itself. func (b *Broker) refreshMetadata() error { meta, err := b.fetchMetadata() if err == nil { b.cacheMetadata(meta) } return err } // muRefreshMetadata calls refreshMetadata, but protects it with broker's lock. func (b *Broker) muRefreshMetadata() error { b.mu.Lock() err := b.refreshMetadata() b.mu.Unlock() return err } func (b *Broker) callOnClusterController(f func(c *connection) error) error { checkednodes := make(map[int32]bool) var err error var conn *connection controllerID := b.metadata.controllerId // try all existing connections first for nodeID, conn := range b.conns { checkednodes[nodeID] = true if nodeID == controllerID { err = f(conn) if err != nil { continue } return nil } } // try all nodes that we know of that we're not connected to for nodeID, addr := range b.metadata.nodes { if nodeID == controllerID { conn, err = b.getConnection(addr) if err != nil { return err } err = f(conn) // we had no active connection to this node, so most likely we don't need it _ = conn.Close() return err } } if err != nil { return err } else { return errors.New("Cannot get connection to controller node") } } func (b *Broker) callOnActiveConnection(f func(c *connection) error) error { checkednodes := make(map[int32]bool) var err error var conn *connection // try all existing connections first for nodeID, conn := range b.conns { checkednodes[nodeID] = true err = f(conn) if err != nil { continue } return nil } // try all nodes that we know of that we're not connected to for nodeID, addr := range b.metadata.nodes { if _, ok := checkednodes[nodeID]; ok { continue } conn, err = b.getConnection(addr) if err != nil { continue } err = f(conn) // we had no active connection to this node, so most likely we don't need it _ = conn.Close() if err != nil { continue } return nil } for _, addr := range b.getInitialAddresses() { conn, err = b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect to seed node", "address", addr, "error", err) continue } err = f(conn) _ = conn.Close() if err == nil { return nil } } return err } // fetchMetadata is requesting metadata information from any node and return // protocol response if successful // // If "topics" are specified, only fetch metadata for those topics (can be // used to create a topic) // // Because it's using metadata information to find node connections it's not // thread safe and using it require locking. func (b *Broker) fetchMetadata(topics ...string) (*proto.MetadataResp, error) { checkednodes := make(map[int32]bool) // try all existing connections first for nodeID, conn := range b.conns { checkednodes[nodeID] = true resp, err := conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, Topics: topics, }) if err != nil { b.conf.Logger.Debug("cannot fetch metadata from node", "nodeID", nodeID, "error", err) continue } return resp, nil } // try all nodes that we know of that we're not connected to for nodeID, addr := range b.metadata.nodes { if _, ok := checkednodes[nodeID]; ok { continue } conn, err := b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect", "address", addr, "error", err) continue } resp, err := conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, Topics: topics, }) // we had no active connection to this node, so most likely we don't need it _ = conn.Close() if err != nil { b.conf.Logger.Debug("cannot fetch metadata from node", "nodeID", nodeID, "error", err) continue } return resp, nil } for _, addr := range b.getInitialAddresses() { conn, err := b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect to seed node", "address", addr, "error", err) continue } resp, err := conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, Topics: topics, }) _ = conn.Close() if err == nil { return resp, nil } b.conf.Logger.Debug("cannot fetch metadata", "address", addr, "error", err) } return nil, errors.New("cannot fetch metadata. No topics created?") } // cacheMetadata creates new internal metadata representation using data from // given response. It's call has to be protected with lock. // // Do not call with partial metadata response, this assumes we have the full // set of metadata in the response func (b *Broker) cacheMetadata(resp *proto.MetadataResp) { if !b.metadata.created.IsZero() { b.conf.Logger.Debug("rewriting old metadata", "age", time.Now().Sub(b.metadata.created)) } b.metadata = clusterMetadata{ created: time.Now(), nodes: make(map[int32]string), endpoints: make(map[topicPartition]int32), partitions: make(map[string]int32), } b.metadata.controllerId = resp.ControllerID for _, node := range resp.Brokers { addr := fmt.Sprintf("%s:%d", node.Host, node.Port) b.metadata.nodes[node.NodeID] = addr } for _, topic := range resp.Topics { for _, part := range topic.Partitions { dest := topicPartition{topic.Name, part.ID} b.metadata.endpoints[dest] = part.Leader } b.metadata.partitions[topic.Name] = int32(len(topic.Partitions)) } b.conf.Logger.Debug("new metadata cached") } // PartitionCount returns how many partitions a given topic has. If a topic // is not known, 0 and an error are returned. func (b *Broker) PartitionCount(topic string) (int32, error) { b.mu.Lock() defer b.mu.Unlock() count, ok := b.metadata.partitions[topic] if ok { return count, nil } return 0, fmt.Errorf("topic %s not found in metadata", topic) } // muLeaderConnection returns connection to leader for given partition. If // connection does not exist, broker will try to connect first and add store // connection for any further use. // // Failed connection retry is controlled by broker configuration. // // If broker is configured to allow topic creation, then if we don't find // the leader we will return a random broker. The broker will error if we end // up producing to it incorrectly (i.e., our metadata happened to be out of // date). func (b *Broker) muLeaderConnection(topic string, partition int32) (conn *connection, err error) { tp := topicPartition{topic, partition} b.mu.Lock() defer b.mu.Unlock() for retry := 0; retry < b.conf.LeaderRetryLimit; retry++ { if retry != 0 { b.mu.Unlock() b.conf.Logger.Debug("cannot get leader connection", "topic", topic, "partition", partition, "retry", retry, "sleep", b.conf.LeaderRetryWait.String()) time.Sleep(b.conf.LeaderRetryWait) b.mu.Lock() } nodeID, ok := b.metadata.endpoints[tp] if !ok { err = b.refreshMetadata() if err != nil { b.conf.Logger.Info("cannot get leader connection: cannot refresh metadata", "error", err) continue } nodeID, ok = b.metadata.endpoints[tp] if !ok { err = proto.ErrUnknownTopicOrPartition // If we allow topic creation, now is the point where it is likely that this // is a brand new topic, so try to get metadata on it (which will trigger // the creation process) if b.conf.AllowTopicCreation { _, err := b.fetchMetadata(topic) if err != nil { b.conf.Logger.Info("failed to fetch metadata for new topic", "topic", topic, "error", err) } } else { b.conf.Logger.Info("cannot get leader connection: unknown topic or partition", "topic", topic, "partition", partition, "endpoint", tp) } continue } } conn, ok = b.conns[nodeID] if !ok { addr, ok := b.metadata.nodes[nodeID] if !ok { b.conf.Logger.Info("cannot get leader connection: no information about node", "nodeID", nodeID) err = proto.ErrBrokerNotAvailable delete(b.metadata.endpoints, tp) continue } conn, err = b.getConnection(addr) if err != nil { b.conf.Logger.Info("cannot get leader connection: cannot connect to node", "address", addr, "error", err) delete(b.metadata.endpoints, tp) continue } b.conns[nodeID] = conn } return conn, nil } return nil, err } func (b *Broker) getConnection(addr string) (*connection, error) { var c *connection var err error if b.conf.useTLS() { c, err = newTLSConnection(addr, b.conf.TLSCa, b.conf.TLSCert, b.conf.TLSKey, b.conf.DialTimeout, b.conf.ReadTimeout) } else { c, err = newTCPConnection(addr, b.conf.DialTimeout, b.conf.ReadTimeout) } if err != nil { return nil, err } return c, nil } // coordinatorConnection returns connection to offset coordinator for given group. // // Failed connection retry is controlled by broker configuration. func (b *Broker) muCoordinatorConnection(consumerGroup string) (conn *connection, resErr error) { b.mu.Lock() defer b.mu.Unlock() for retry := 0; retry < b.conf.LeaderRetryLimit; retry++ { if retry != 0 { b.mu.Unlock() time.Sleep(b.conf.LeaderRetryWait) b.mu.Lock() } // first try all already existing connections for _, conn := range b.conns { resp, err := conn.ConsumerMetadata(&proto.ConsumerMetadataReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, ConsumerGroup: consumerGroup, }) if err != nil { b.conf.Logger.Debug("cannot fetch coordinator metadata", "consumGrp", consumerGroup, "error", err) resErr = err continue } if resp.Err != nil { b.conf.Logger.Debug("coordinator metadata response error", "consumGrp", consumerGroup, "error", resp.Err) resErr = err continue } addr := fmt.Sprintf("%s:%d", resp.CoordinatorHost, resp.CoordinatorPort) conn, err := b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect to node", "coordinatorID", resp.CoordinatorID, "address", addr, "error", err) resErr = err continue } b.conns[resp.CoordinatorID] = conn return conn, nil } // if none of the connections worked out, try with fresh data if err := b.refreshMetadata(); err != nil { b.conf.Logger.Debug("cannot refresh metadata", "error", err) resErr = err continue } for nodeID, addr := range b.metadata.nodes { if _, ok := b.conns[nodeID]; ok { // connection to node is cached so it was already checked continue } conn, err := b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect to node", "nodeID", nodeID, "address", addr, "error", err) resErr = err continue } b.conns[nodeID] = conn resp, err := conn.ConsumerMetadata(&proto.ConsumerMetadataReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, ConsumerGroup: consumerGroup, }) if err != nil { b.conf.Logger.Debug("cannot fetch metadata", "consumGrp", consumerGroup, "error", err) resErr = err continue } if resp.Err != nil { b.conf.Logger.Debug("metadata response error", "consumGrp", consumerGroup, "error", resp.Err) resErr = err continue } addr := fmt.Sprintf("%s:%d", resp.CoordinatorHost, resp.CoordinatorPort) conn, err = b.getConnection(addr) if err != nil { b.conf.Logger.Debug("cannot connect to node", "coordinatorID", resp.CoordinatorID, "address", addr, "error", err) resErr = err continue } b.conns[resp.CoordinatorID] = conn return conn, nil } resErr = proto.ErrNoCoordinator } return nil, resErr } // muCloseDeadConnection is closing and removing any reference to given // connection. Because we remove dead connection, additional request to refresh // metadata is made // // muCloseDeadConnection call it protected with broker's lock. func (b *Broker) muCloseDeadConnection(conn *connection) { b.mu.Lock() defer b.mu.Unlock() for nid, c := range b.conns { if c == conn { b.conf.Logger.Debug("closing dead connection", "nodeID", nid) delete(b.conns, nid) _ = c.Close() if err := b.refreshMetadata(); err != nil { b.conf.Logger.Debug("cannot refresh metadata", "error", err) } return } } } // offset will return offset value for given partition. Use timems to specify // which offset value should be returned. func (b *Broker) offset(topic string, partition int32, timems int64) (offset int64, err error) { for retry := 0; retry < b.conf.RetryErrLimit; retry++ { if retry != 0 { time.Sleep(b.conf.RetryErrWait) err = b.muRefreshMetadata() if err != nil { continue } } var conn *connection conn, err = b.muLeaderConnection(topic, partition) if err != nil { return 0, err } var resp *proto.OffsetResp resp, err = conn.Offset(&proto.OffsetReq{ RequestHeader: proto.RequestHeader{ClientID: b.conf.ClientID}, ReplicaID: -1, // any client Topics: []proto.OffsetReqTopic{ { Name: topic, Partitions: []proto.OffsetReqPartition{ { ID: partition, TimeMs: timems, MaxOffsets: 2, }, }, }, }, }) if err != nil { if _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE { // Connection is broken, so should be closed, but the error is // still valid and should be returned so that retry mechanism have // chance to react. b.conf.Logger.Debug("connection died while sending message", "topic", topic, "partition", partition, "error", err) b.muCloseDeadConnection(conn) } continue } for _, t := range resp.Topics { if t.Name != topic { b.conf.Logger.Debug("unexpected topic information", "expected", topic, "got", t.Name) continue } for _, part := range t.Partitions { if part.ID != partition { b.conf.Logger.Debug("unexpected partition information", "topic", t.Name, "expected", partition, "got", part.ID) continue } if err = part.Err; err == nil { if len(part.Offsets) == 0 { return 0, nil } else { return part.Offsets[0], nil } } } } } return 0, errors.New("incomplete fetch response") } // OffsetEarliest returns the oldest offset available on the given partition. func (b *Broker) OffsetEarliest(topic string, partition int32) (offset int64, err error) { return b.offset(topic, partition, -2) } // OffsetLatest return the offset of the next message produced in given partition func (b *Broker) OffsetLatest(topic string, partition int32) (offset int64, err error) { return b.offset(topic, partition, -1) } // ProducerConf represents the configuration of a producer. type ProducerConf struct { // Compression method to use, defaulting to proto.CompressionNone. Compression proto.Compression // Message ACK configuration. Use proto.RequiredAcksAll to require all // servers to write, proto.RequiredAcksLocal to wait only for leader node // answer or proto.RequiredAcksNone to not wait for any response. // Setting this to any other, greater than zero value will make producer to // wait for given number of servers to confirm write before returning. RequiredAcks int16 // Timeout of single produce request. By default, 5 seconds. RequestTimeout time.Duration // RetryLimit specify how many times message producing should be retried in // case of failure, before returning the error to the caller. By default // set to 10. RetryLimit int // RetryWait specify wait duration before produce retry after failure. By // default set to 200ms. RetryWait time.Duration // Logger used by producer. By default, reuse logger assigned to broker. Logger Logger } // NewProducerConf returns a default producer configuration. func NewProducerConf() ProducerConf { return ProducerConf{ Compression: proto.CompressionNone, RequiredAcks: proto.RequiredAcksAll, RequestTimeout: 5 * time.Second, RetryLimit: 10, RetryWait: 200 * time.Millisecond, Logger: nil, } } // producer is the link to the client with extra configuration. type producer struct { conf ProducerConf broker *Broker } // Producer returns new producer instance, bound to the broker. func (b *Broker) Producer(conf ProducerConf) Producer { if conf.Logger == nil { conf.Logger = b.conf.Logger } return &producer{ conf: conf, broker: b, } } // Produce writes messages to the given destination. Writes within the call are // atomic, meaning either all or none of them are written to kafka. Produce // has a configurable amount of retries which may be attempted when common // errors are encountered. This behaviour can be configured with the // RetryLimit and RetryWait attributes. // // Upon a successful call, the message's Offset field is updated. func (p *producer) Produce(topic string, partition int32, messages ...*proto.Message) (offset int64, err error) { if len(messages) == 0 { // Newer versions of kafka get upset if we send 0 messages. // Previously, it would return the latest offset, which we // can no longer easily get here. return 0, nil } for retry := 0; retry < p.conf.RetryLimit; retry++ { if retry != 0 { time.Sleep(p.conf.RetryWait) } offset, err = p.produce(topic, partition, messages...) switch err { case nil: for i, msg := range messages { msg.Offset = int64(i) + offset } if retry != 0 { p.conf.Logger.Debug("Produced message after retry", "retry", retry, "topic", topic) } return offset, err case io.EOF, syscall.EPIPE: // p.produce call is closing connection when this error shows up, // but it's also returning it so that retry loop can count this // case // we cannot handle this error here, because there is no direct // access to connection default: if err := p.broker.muRefreshMetadata(); err != nil { p.conf.Logger.Debug("cannot refresh metadata", "error", err) } } p.conf.Logger.Debug("Cannot produce messages", "retry", retry, "topic", topic, "error", err) } p.conf.Logger.Debug("Abort to produce after retrying messages", "retry", p.conf.RetryLimit, "topic", topic) return 0, err } // produce send produce request to leader for given destination. func (p *producer) produce(topic string, partition int32, messages ...*proto.Message) (offset int64, err error) { conn, err := p.broker.muLeaderConnection(topic, partition) if err != nil { return 0, err } req := proto.ProduceReq{ RequestHeader: proto.RequestHeader{ClientID: p.broker.conf.ClientID}, Compression: p.conf.Compression, RequiredAcks: p.conf.RequiredAcks, Timeout: p.conf.RequestTimeout, Topics: []proto.ProduceReqTopic{ { Name: topic, Partitions: []proto.ProduceReqPartition{ { ID: partition, Messages: messages, }, }, }, }, } resp, err := conn.Produce(&req) if err != nil { if _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE { // Connection is broken, so should be closed, but the error is // still valid and should be returned so that retry mechanism have // chance to react. p.conf.Logger.Debug("connection died while sending message", "topic", topic, "partition", partition, "error", err) p.broker.muCloseDeadConnection(conn) } return 0, err } if req.RequiredAcks == proto.RequiredAcksNone { return 0, err } // we expect single partition response found := false for _, t := range resp.Topics { if t.Name != topic { p.conf.Logger.Debug("unexpected topic information received", "expected", topic, "got", t.Name) continue } for _, part := range t.Partitions { if part.ID != partition { p.conf.Logger.Debug("unexpected partition information received", "topic", t.Name, "expected", partition, "got", part.ID) continue } found = true offset = part.Offset err = part.Err } } if !found { return 0, errors.New("incomplete produce response") } return offset, err } // ConsumerConf represents the configuration of a consumer. type ConsumerConf struct { // Topic name that should be consumed Topic string // Partition ID that should be consumed. Partition int32 // RequestTimeout controls fetch request timeout. This operation is // blocking the whole connection, so it should always be set to a small // value. By default it's set to 50ms. // To control fetch function timeout use RetryLimit and RetryWait. RequestTimeout time.Duration // RetryLimit limits fetching messages a given amount of times before // returning ErrNoData error. // // Default is -1, which turns this limit off. RetryLimit int // RetryWait controls the duration of wait between fetch request calls, // when no data was returned. // // Default is 50ms. RetryWait time.Duration // RetryErrLimit limits the number of retry attempts when an error is // encountered. // // Default is 10. RetryErrLimit int // RetryErrWait controls the wait duration between retries after failed // fetch request. // // Default is 500ms. RetryErrWait time.Duration // MinFetchSize is the minimum size of messages to fetch in bytes. // // Default is 1 to fetch any message available. MinFetchSize int32 // MaxFetchSize is the maximum size of data which can be sent by kafka node // to consumer. // // Default is 4MB. MaxFetchSize int32 // Consumer cursor starting point. Set to StartOffsetNewest to receive only // newly created messages or StartOffsetOldest to read everything. Assign // any offset value to manually set cursor -- consuming starts with the // message whose offset is equal to given value (including first message). // // Default is StartOffsetOldest. StartOffset int64 // Logger used by consumer. By default, reuse logger assigned to broker. Logger Logger } // NewConsumerConf returns the default consumer configuration. func NewConsumerConf(topic string, partition int32) ConsumerConf { return ConsumerConf{ Topic: topic, Partition: partition, RequestTimeout: time.Millisecond * 50, RetryLimit: -1, RetryWait: time.Millisecond * 50, RetryErrLimit: 10, RetryErrWait: time.Millisecond * 500, MinFetchSize: 1, MaxFetchSize: 4 * 1024 * 1024, StartOffset: StartOffsetOldest, Logger: nil, } } // Consumer represents a single partition reading buffer. Consumer is also // providing limited failure handling and message filtering. type consumer struct { broker *Broker conf ConsumerConf mu sync.Mutex offset int64 // offset of next NOT consumed message conn *connection msgbuf []*proto.Message } // Consumer creates a new consumer instance, bound to the broker. func (b *Broker) Consumer(conf ConsumerConf) (Consumer, error) { return b.consumer(conf) } // BatchConsumer creates a new BatchConsumer instance, bound to the broker. func (b *Broker) BatchConsumer(conf ConsumerConf) (BatchConsumer, error) { return b.consumer(conf) } func (b *Broker) consumer(conf ConsumerConf) (*consumer, error) { conn, err := b.muLeaderConnection(conf.Topic, conf.Partition) if err != nil { return nil, err } if conf.Logger == nil { conf.Logger = b.conf.Logger } offset := conf.StartOffset if offset < 0 { switch offset { case StartOffsetNewest: off, err := b.OffsetLatest(conf.Topic, conf.Partition) if err != nil { return nil, err } offset = off case StartOffsetOldest: off, err := b.OffsetEarliest(conf.Topic, conf.Partition) if err != nil { return nil, err } offset = off default: return nil, fmt.Errorf("invalid start offset: %d", conf.StartOffset) } } c := &consumer{ broker: b, conn: conn, conf: conf, msgbuf: make([]*proto.Message, 0), offset: offset, } return c, nil } // consume is returning a batch of messages from consumed partition. // Consumer can retry fetching messages even if responses return no new // data. Retry behaviour can be configured through RetryLimit and RetryWait // consumer parameters. // // consume can retry sending request on common errors. This behaviour can // be configured with RetryErrLimit and RetryErrWait consumer configuration // attributes. func (c *consumer) consume() ([]*proto.Message, error) { var msgbuf []*proto.Message var retry int for len(msgbuf) == 0 { var err error msgbuf, err = c.fetch() if err != nil { return nil, err } if len(msgbuf) == 0 { if c.conf.RetryWait > 0 { time.Sleep(c.conf.RetryWait) } retry++ if c.conf.RetryLimit != -1 && retry > c.conf.RetryLimit { return nil, ErrNoData } } } return msgbuf, nil } func (c *consumer) Consume() (*proto.Message, error) { c.mu.Lock() defer c.mu.Unlock() if len(c.msgbuf) == 0 { var err error c.msgbuf, err = c.consume() if err != nil { return nil, err } } msg := c.msgbuf[0] c.msgbuf = c.msgbuf[1:] c.offset = msg.Offset + 1 return msg, nil } func (c *consumer) ConsumeBatch() ([]*proto.Message, error) { c.mu.Lock() defer c.mu.Unlock() batch, err := c.consume() if err != nil { return nil, err } c.offset = batch[len(batch)-1].Offset + 1 return batch, nil } // fetch and return next batch of messages. In case of certain set of errors, // retry sending fetch request. Retry behaviour can be configured with // RetryErrLimit and RetryErrWait consumer configuration attributes. func (c *consumer) fetch() ([]*proto.Message, error) { req := proto.FetchReq{ RequestHeader: proto.RequestHeader{ClientID: c.broker.conf.ClientID}, MaxWaitTime: c.conf.RequestTimeout, MinBytes: c.conf.MinFetchSize, MaxBytes: c.conf.MaxFetchSize, Topics: []proto.FetchReqTopic{ { Name: c.conf.Topic, Partitions: []proto.FetchReqPartition{ { ID: c.conf.Partition, FetchOffset: c.offset, MaxBytes: c.conf.MaxFetchSize, }, }, }, }, } var resErr error for retry := 0; retry < c.conf.RetryErrLimit; retry++ { if retry != 0 { time.Sleep(c.conf.RetryErrWait) } if c.conn == nil { conn, err := c.broker.muLeaderConnection(c.conf.Topic, c.conf.Partition) if err != nil { resErr = err continue } c.conn = conn } resp, err := c.conn.Fetch(&req) resErr = err if _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE { c.conf.Logger.Debug("connection died while fetching message", "topic", c.conf.Topic, "partition", c.conf.Partition, "error", err) c.broker.muCloseDeadConnection(c.conn) c.conn = nil continue } if err != nil { c.conf.Logger.Debug("cannot fetch messages: unknown error", "retry", retry, "error", err) c.broker.muCloseDeadConnection(c.conn) c.conn = nil continue } messages, shouldRetry, err := extractMessages(resp, c.conf) if shouldRetry { c.conf.Logger.Debug("cannot fetch messages", "retry", retry, "error", err) if err := c.broker.muRefreshMetadata(); err != nil { c.conf.Logger.Debug("cannot refresh metadata", "error", err) } // The connection is fine, so don't close it, // but we may very well need to talk to a different broker now. // Set the conn to nil so that next time around the loop // we'll check the metadata again to see who we're supposed to talk to. c.conn = nil continue } return messages, err } return nil, resErr } // extractMessages extracts relevant messages from a fetch response. // // The boolean response parameter will be true if a temporary error was // encountered, indicating that the fetch may be retried. func extractMessages(resp *proto.FetchResp, conf ConsumerConf) ([]*proto.Message, bool, error) { for _, topic := range resp.Topics { if topic.Name != conf.Topic { conf.Logger.Warn("unexpected topic information received", "got", topic.Name, "expected", conf.Topic) continue } for _, part := range topic.Partitions { if part.ID != conf.Partition { conf.Logger.Warn("unexpected partition information received", "topic", topic.Name, "expected", conf.Partition, "got", part.ID) continue } switch part.Err { case proto.ErrLeaderNotAvailable, proto.ErrNotLeaderForPartition, proto.ErrBrokerNotAvailable, proto.ErrUnknownTopicOrPartition: return nil, true, part.Err } if part.MessageVersion < 2 { return part.Messages, false, part.Err } // In the kafka > 0.11 MessageSet was replaced // with a new structure called RecordBatch // and Message was replaced with Record // In order to keep API for Consumer // here we repack Records to Messages recordCount := 0 for _, rb := range part.RecordBatches { recordCount += len(rb.Records) } messages := make([]*proto.Message, 0, recordCount) for _, rb := range part.RecordBatches { for _, r := range rb.Records { m := &proto.Message{ Key: r.Key, Value: r.Value, Offset: rb.FirstOffset + r.OffsetDelta, Topic: topic.Name, Partition: part.ID, TipOffset: part.TipOffset, } messages = append(messages, m) } } return messages, false, part.Err } } return nil, false, errors.New("incomplete fetch response") } // OffsetCoordinatorConf represents the configuration of an offset coordinator. type OffsetCoordinatorConf struct { ConsumerGroup string // RetryErrLimit limits messages fetch retry upon failure. By default 10. RetryErrLimit int // RetryErrWait controls wait duration between retries after failed fetch // request. By default 500ms. RetryErrWait time.Duration // Logger used by consumer. By default, reuse logger assigned to broker. Logger Logger } // NewOffsetCoordinatorConf returns default OffsetCoordinator configuration. func NewOffsetCoordinatorConf(consumerGroup string) OffsetCoordinatorConf { return OffsetCoordinatorConf{ ConsumerGroup: consumerGroup, RetryErrLimit: 10, RetryErrWait: time.Millisecond * 500, Logger: nil, } } type offsetCoordinator struct { conf OffsetCoordinatorConf broker *Broker mu sync.Mutex conn *connection } // OffsetCoordinator returns offset management coordinator for single consumer // group, bound to broker. func (b *Broker) OffsetCoordinator(conf OffsetCoordinatorConf) (OffsetCoordinator, error) { conn, err := b.muCoordinatorConnection(conf.ConsumerGroup) if err != nil { return nil, err } if conf.Logger == nil { conf.Logger = b.conf.Logger } c := &offsetCoordinator{ broker: b, conf: conf, conn: conn, } return c, nil } // Commit is saving offset information for given topic and partition. // // Commit can retry saving offset information on common errors. This behaviour // can be configured with with RetryErrLimit and RetryErrWait coordinator // configuration attributes. func (c *offsetCoordinator) Commit(topic string, partition int32, offset int64) error { return c.commit(topic, partition, offset, "") } // Commit works exactly like Commit method, but store extra metadata string // together with offset information. func (c *offsetCoordinator) CommitFull(topic string, partition int32, offset int64, metadata string) error { return c.commit(topic, partition, offset, metadata) } // commit is saving offset and metadata information. Provides limited error // handling configurable through OffsetCoordinatorConf. func (c *offsetCoordinator) commit(topic string, partition int32, offset int64, metadata string) (resErr error) { c.mu.Lock() defer c.mu.Unlock() for retry := 0; retry < c.conf.RetryErrLimit; retry++ { if retry != 0 { c.mu.Unlock() time.Sleep(c.conf.RetryErrWait) c.mu.Lock() } // connection can be set to nil if previously reference connection died if c.conn == nil { conn, err := c.broker.muCoordinatorConnection(c.conf.ConsumerGroup) if err != nil { resErr = err c.conf.Logger.Debug("cannot connect to coordinator", "consumGrp", c.conf.ConsumerGroup, "error", err) continue } c.conn = conn } resp, err := c.conn.OffsetCommit(&proto.OffsetCommitReq{ RequestHeader: proto.RequestHeader{ClientID: c.broker.conf.ClientID}, ConsumerGroup: c.conf.ConsumerGroup, Topics: []proto.OffsetCommitReqTopic{ { Name: topic, Partitions: []proto.OffsetCommitReqPartition{ {ID: partition, Offset: offset, TimeStamp: time.Now(), Metadata: metadata}, }, }, }, }) resErr = err if _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE { c.conf.Logger.Debug("connection died while commiting", "topic", topic, "partition", partition, "consumGrp", c.conf.ConsumerGroup) c.broker.muCloseDeadConnection(c.conn) c.conn = nil } else if err == nil { for _, t := range resp.Topics { if t.Name != topic { c.conf.Logger.Debug("unexpected topic information received", "got", t.Name, "expected", topic) continue } for _, part := range t.Partitions { if part.ID != partition { c.conf.Logger.Debug("unexpected partition information received", "topic", topic, "got", part.ID, "expected", partition) continue } return part.Err } } return errors.New("response does not contain commit information") } } return resErr } // Offset is returning last offset and metadata information committed for given // topic and partition. // Offset can retry sending request on common errors. This behaviour can be // configured with with RetryErrLimit and RetryErrWait coordinator // configuration attributes. func (c *offsetCoordinator) Offset(topic string, partition int32) (offset int64, metadata string, resErr error) { c.mu.Lock() defer c.mu.Unlock() for retry := 0; retry < c.conf.RetryErrLimit; retry++ { if retry != 0 { c.mu.Unlock() time.Sleep(c.conf.RetryErrWait) c.mu.Lock() } // connection can be set to nil if previously reference connection died if c.conn == nil { conn, err := c.broker.muCoordinatorConnection(c.conf.ConsumerGroup) if err != nil { c.conf.Logger.Debug("cannot connect to coordinator", "consumGrp", c.conf.ConsumerGroup, "error", err) resErr = err continue } c.conn = conn } resp, err := c.conn.OffsetFetch(&proto.OffsetFetchReq{ ConsumerGroup: c.conf.ConsumerGroup, Topics: []proto.OffsetFetchReqTopic{ { Name: topic, Partitions: []int32{partition}, }, }, }) resErr = err switch err { case io.EOF, syscall.EPIPE: c.conf.Logger.Debug("connection died while fetching offset", "topic", topic, "partition", partition, "consumGrp", c.conf.ConsumerGroup) c.broker.muCloseDeadConnection(c.conn) c.conn = nil case nil: for _, t := range resp.Topics { if t.Name != topic { c.conf.Logger.Debug("unexpected topic information received", "got", t.Name, "expected", topic) continue } for _, part := range t.Partitions { if part.ID != partition { c.conf.Logger.Debug("unexpected partition information received", "topic", topic, "expected", partition, "get", part.ID) continue } if part.Err != nil { return 0, "", part.Err } return part.Offset, part.Metadata, nil } } return 0, "", errors.New("response does not contain offset information") } } return 0, "", resErr } kafka-2.1.1/v2/broker_test.go000066400000000000000000001736511356004474300160140ustar00rootroot00000000000000package kafka import ( "errors" "fmt" "reflect" "strings" "sync" "testing" "time" "github.com/optiopay/kafka/v2/proto" ) // newTestBrokerConf returns BrokerConf with default configuration adjusted for // tests func newTestBrokerConf(clientID string) BrokerConf { conf := NewBrokerConf(clientID) conf.DialTimeout = 400 * time.Millisecond conf.LeaderRetryLimit = 10 conf.LeaderRetryWait = 2 * time.Millisecond return conf } type MetadataTester struct { host string port int topics map[string]bool allowCreate bool numGeneralFetches int numSpecificFetches int } func NewMetadataHandler(srv *Server, allowCreate bool) *MetadataTester { host, port := srv.HostPort() tester := &MetadataTester{ host: host, port: port, allowCreate: allowCreate, topics: make(map[string]bool), } tester.topics["test"] = true return tester } func (m *MetadataTester) NumGeneralFetches() int { return m.numGeneralFetches } func (m *MetadataTester) NumSpecificFetches() int { return m.numSpecificFetches } func (m *MetadataTester) Handler() RequestHandler { return func(request Serializable) Serializable { req := request.(*proto.MetadataReq) if len(req.Topics) == 0 { m.numGeneralFetches++ } else { m.numSpecificFetches++ } resp := &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: m.host, Port: int32(m.port)}, }, Topics: []proto.MetadataRespTopic{}, } wantsTopic := make(map[string]bool) for _, topic := range req.Topics { if m.allowCreate { m.topics[topic] = true } wantsTopic[topic] = true } for topic := range m.topics { // Return either all topics or only topics that they explicitly requested _, explicitTopic := wantsTopic[topic] if len(req.Topics) > 0 && !explicitTopic { continue } resp.Topics = append(resp.Topics, proto.MetadataRespTopic{ Name: topic, Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, { ID: 1, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, }, }) } return resp } } func TestDialWithInvalidAddress(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() addresses := []string{"localhost:4291190", "localhost:2141202", srv.Address()} broker, err := Dial(addresses, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } broker.Close() } func TestDialWithNoAddress(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() _, err := Dial(nil, newTestBrokerConf("tester")) if err == nil { t.Fatalf("expected error, but none received") } } // Tests to ensure that our dial function is randomly selecting brokers from the // list of available brokers func TestDialRandomized(t *testing.T) { srv1 := NewServer() srv1.Start() defer srv1.Close() srv2 := NewServer() srv2.Start() defer srv2.Close() srv3 := NewServer() srv3.Start() defer srv3.Close() for i := 0; i < 30; i++ { _, err := Dial([]string{srv1.Address(), srv2.Address(), srv3.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } } if srv1.Processed == 60 { t.Fatal("all traffic went to first broker") } if srv1.Processed+srv2.Processed+srv3.Processed != 60 { t.Fatal("received unexpected number of requests") } if srv1.Processed == 0 || srv2.Processed == 0 || srv3.Processed == 0 { t.Fatal("one broker received no traffic") } } func TestProducer(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) messages := []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, } _, err = producer.Produce("does-not-exist", 42142, messages...) if err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected '%s', got %s", proto.ErrUnknownTopicOrPartition, err) } var handleErr error var createdMsgs int srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) if req.Topics[0].Name != "test" { handleErr = fmt.Errorf("expected 'test' topic, got %s", req.Topics[0].Name) return nil } if req.Topics[0].Partitions[0].ID != 0 { handleErr = fmt.Errorf("expected 0 partition, got %d", req.Topics[0].Partitions[0].ID) return nil } messages := req.Topics[0].Partitions[0].Messages for _, msg := range messages { createdMsgs++ crc := proto.ComputeCrc(msg, proto.CompressionNone) if msg.Crc != crc { handleErr = fmt.Errorf("expected '%d' crc, got %d", crc, msg.Crc) return nil } } return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 5, }, }, }, }, } }) offset, err := producer.Produce("test", 0, messages...) if handleErr != nil { t.Fatalf("handling error: %s", handleErr) } if err != nil { t.Fatalf("expected no error, got %s", err) } if offset != 5 { t.Fatalf("expected offset different than %d", offset) } if messages[0].Offset != 5 || messages[1].Offset != 6 { t.Fatalf("message offset is incorrect: %#v", messages) } if createdMsgs != 2 { t.Fatalf("expected 2 messages to be created, got %d", createdMsgs) } broker.Close() } func TestBrokerWithEmptyCertificates(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) brokerConf := newTestBrokerConf("tester") brokerConf.TLSCa = []byte{} brokerConf.TLSCert = []byte{} brokerConf.TLSKey = []byte{} broker, err := Dial([]string{srv.Address()}, brokerConf) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) messages := []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, } _, err = producer.Produce("does-not-exist", 42142, messages...) // error means that we successfully connected if err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected '%s', got %s", proto.ErrUnknownTopicOrPartition, err) } broker.Close() } func TestProducerWithNoAck(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RequiredAcks = proto.RequiredAcksNone prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) messages := []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, } _, err = producer.Produce("does-not-exist", 42142, messages...) if err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected '%s', got %s", proto.ErrUnknownTopicOrPartition, err) } errc := make(chan error) var createdMsgs int srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { defer close(errc) req := request.(*proto.ProduceReq) if req.RequiredAcks != proto.RequiredAcksNone { errc <- fmt.Errorf("expected no ack request, got %v", req.RequiredAcks) return nil } if req.Topics[0].Name != "test" { errc <- fmt.Errorf("expected 'test' topic, got %s", req.Topics[0].Name) return nil } if req.Topics[0].Partitions[0].ID != 0 { errc <- fmt.Errorf("expected 0 partition, got %d", req.Topics[0].Partitions[0].ID) return nil } messages := req.Topics[0].Partitions[0].Messages for _, msg := range messages { createdMsgs++ crc := proto.ComputeCrc(msg, proto.CompressionNone) if msg.Crc != crc { errc <- fmt.Errorf("expected '%d' crc, got %d", crc, msg.Crc) return nil } } return nil }) offset, err := producer.Produce("test", 0, messages...) if err := <-errc; err != nil { t.Fatalf("handling error: %s", err) } if err != nil { t.Fatalf("expected no error, got %s", err) } if offset != 0 { t.Fatalf("expected offset different than %d", offset) } if createdMsgs != 2 { t.Fatalf("expected 2 messages to be created, got %d", createdMsgs) } broker.Close() } func TestProduceWhileLeaderChange(t *testing.T) { srv1 := NewServer() srv1.Start() defer srv1.Close() srv2 := NewServer() srv2.Start() defer srv2.Close() host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() brokers := []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, } var metaCalls int metadataHandler := func(srvName string) func(Serializable) Serializable { return func(request Serializable) Serializable { metaCalls++ var leader int32 = 1 // send invalid information to producer several times to make sure // it's producing to the wrong node and retrying several times if metaCalls > 4 { leader = 2 } req := request.(*proto.MetadataReq) resp := &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: brokers, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: leader, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } return resp } } srv1.Handle(proto.MetadataReqKind, metadataHandler("srv1")) srv2.Handle(proto.MetadataReqKind, metadataHandler("srv2")) var prod1Calls int srv1.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { prod1Calls++ req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 1, Err: proto.ErrNotLeaderForPartition, }, }, }, }, } }) var prod2Calls int srv2.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { prod2Calls++ req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 1, Offset: 5, }, }, }, }, } }) broker, err := Dial([]string{srv1.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } defer broker.Close() prod := broker.Producer(NewProducerConf()) if off, err := prod.Produce("test", 1, &proto.Message{Value: []byte("foo")}); err != nil { t.Errorf("cannot produce message: %s", err) } else if off != 5 { t.Errorf("expected to get offset 5, got %d", off) } if prod1Calls != 4 { t.Errorf("expected prod1Calls to be 4, got %d", prod1Calls) } if prod2Calls != 1 { t.Errorf("expected prod2Calls to be 1, got %d", prod2Calls) } } func TestConsumer(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) host, port := srv.HostPort() return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host, Port: int32(port)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 413, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, }, }, }, } }) fetchCallCount := 0 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) fetchCallCount++ if fetchCallCount < 2 { return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 413, TipOffset: 0, Messages: []*proto.Message{}, }, }, }, }, } } messages := []*proto.Message{ {Offset: 3, Key: []byte("1"), Value: []byte("first")}, {Offset: 4, Key: []byte("2"), Value: []byte("second")}, {Offset: 5, Key: []byte("3"), Value: []byte("three")}, } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 413, TipOffset: 2, Messages: messages, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } if _, err := broker.Consumer(NewConsumerConf("does-not-exists", 413)); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected %s error, got %s", proto.ErrUnknownTopicOrPartition, err) } if _, err := broker.Consumer(NewConsumerConf("test", 1)); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected %s error, got %s", proto.ErrUnknownTopicOrPartition, err) } consConf := NewConsumerConf("test", 413) consConf.RetryWait = time.Millisecond consConf.StartOffset = 0 consConf.RetryLimit = 4 consumer, err := broker.Consumer(consConf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } msg1, err := consumer.Consume() if err != nil { t.Fatalf("expected no errors, got %s", err) } if string(msg1.Value) != "first" || string(msg1.Key) != "1" || msg1.Offset != 3 { t.Fatalf("expected different message than %#v", msg1) } msg2, err := consumer.Consume() if err != nil { t.Fatalf("expected no errors, got %s", err) } if string(msg2.Value) != "second" || string(msg2.Key) != "2" || msg2.Offset != 4 { t.Fatalf("expected different message than %#v", msg2) } broker.Close() } func TestBatchConsumer(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) host, port := srv.HostPort() return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host, Port: int32(port)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 413, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, }, }, }, } }) fetchCallCount := 0 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) fetchCallCount++ if fetchCallCount < 2 { return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 413, TipOffset: 0, Messages: []*proto.Message{}, }, }, }, }, } } messages := []*proto.Message{ {Offset: 3, Key: []byte("1"), Value: []byte("first")}, {Offset: 4, Key: []byte("2"), Value: []byte("second")}, {Offset: 5, Key: []byte("3"), Value: []byte("three")}, } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 413, TipOffset: 2, Messages: messages, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } if _, err := broker.BatchConsumer(NewConsumerConf("does-not-exists", 413)); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected %s error, got %s", proto.ErrUnknownTopicOrPartition, err) } if _, err := broker.BatchConsumer(NewConsumerConf("test", 1)); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected %s error, got %s", proto.ErrUnknownTopicOrPartition, err) } consConf := NewConsumerConf("test", 413) consConf.RetryWait = time.Millisecond consConf.StartOffset = 0 consConf.RetryLimit = 4 consumer, err := broker.BatchConsumer(consConf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } batch, err := consumer.ConsumeBatch() if err != nil { t.Fatalf("expected no errors, got %s", err) } if len(batch) != 3 { t.Fatalf("expected 3 messages, got %d", len(batch)) } if string(batch[0].Value) != "first" || string(batch[0].Key) != "1" || batch[0].Offset != 3 { t.Fatalf("expected different message than %#v", batch[0]) } if string(batch[1].Value) != "second" || string(batch[1].Key) != "2" || batch[1].Offset != 4 { t.Fatalf("expected different message than %#v", batch[1]) } if string(batch[2].Value) != "three" || string(batch[2].Key) != "3" || batch[2].Offset != 5 { t.Fatalf("expected different message than %#v", batch[2]) } broker.Close() } func TestConsumerRetry(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) host, port := srv.HostPort() return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host, Port: int32(port)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, }, }, }, } }) fetchCallCount := 0 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) fetchCallCount++ return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, TipOffset: 0, Messages: []*proto.Message{}, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { t.Fatalf("cannot create broker: %s", err) } consConf := NewConsumerConf("test", 0) consConf.RetryLimit = 5 consConf.StartOffset = 0 consConf.RetryWait = time.Millisecond consumer, err := broker.Consumer(consConf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } if _, err := consumer.Consume(); err != ErrNoData { t.Fatalf("expected %s error, got %s", ErrNoData, err) } if fetchCallCount != 6 { t.Fatalf("expected 6 fetch calls, got %d", fetchCallCount) } broker.Close() } func TestConsumeInvalidOffset(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) messages := []*proto.Message{ // return message with offset lower than requested {Offset: 3, Key: []byte("1"), Value: []byte("first")}, {Offset: 4, Key: []byte("2"), Value: []byte("second")}, {Offset: 5, Key: []byte("3"), Value: []byte("three")}, } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, TipOffset: 2, Messages: messages, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } consConf := NewConsumerConf("test", 0) consConf.StartOffset = 4 consumer, err := broker.Consumer(consConf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } msg, err := consumer.Consume() if err != nil { t.Fatalf("expected no errors, got %s", err) } if string(msg.Value) != "second" || string(msg.Key) != "2" || msg.Offset != 4 { t.Fatalf("expected different message than %#v", msg) } broker.Close() } func TestPartitionOffset(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) var handlerErr error srv.Handle(proto.OffsetReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetReq) if req.ReplicaID != -1 { handlerErr = fmt.Errorf("expected -1 replica id, got %d", req.ReplicaID) } if req.Topics[0].Partitions[0].TimeMs != -2 { handlerErr = fmt.Errorf("expected -2 timems, got %d", req.Topics[0].Partitions[0].TimeMs) } return &proto.OffsetResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 1, Offsets: []int64{123, 0}, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } offset, err := broker.offset("test", 1, -2) if handlerErr != nil { t.Fatalf("handler error: %s", handlerErr) } if err != nil { t.Fatalf("cannot fetch offset: %s", err) } if offset != 123 { t.Fatalf("expected 123 offset, got %d", offset) } } func TestPartitionCount(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } count, err := broker.PartitionCount("test") if err != nil { t.Fatalf("expected no error, got %s", err) } if count != 2 { t.Fatalf("expected 2 partitions, got %d", count) } count, err = broker.PartitionCount("test2") if err == nil { t.Fatalf("expected an error, got none!") } if count != 0 { t.Fatalf("expected 0 partitions, got %d", count) } } func TestPartitionOffsetClosedConnection(t *testing.T) { srv1 := NewServer() srv1.Start() srv2 := NewServer() srv2.Start() host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() var handlerErr error srv1.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv1.Handle(proto.OffsetReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetReq) if req.ReplicaID != -1 { handlerErr = fmt.Errorf("expected -1 replica id, got %d", req.ReplicaID) } if req.Topics[0].Partitions[0].TimeMs != -2 { handlerErr = fmt.Errorf("expected -2 timems, got %d", req.Topics[0].Partitions[0].TimeMs) } return &proto.OffsetResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 1, Offsets: []int64{123, 0}, }, }, }, }, } }) // after closing first server, which started as leader, broker should ask // other nodes about the leader and refresh connections srv2.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv2.Handle(proto.OffsetReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetReq) if req.ReplicaID != -1 { handlerErr = fmt.Errorf("expected -1 replica id, got %d", req.ReplicaID) } if req.Topics[0].Partitions[0].TimeMs != -2 { handlerErr = fmt.Errorf("expected -2 timems, got %d", req.Topics[0].Partitions[0].TimeMs) } return &proto.OffsetResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 1, Offsets: []int64{234, 0}, }, }, }, }, } }) broker, err := Dial([]string{srv1.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } offset, err := broker.offset("test", 1, -2) if handlerErr != nil { t.Fatalf("handler error: %s", handlerErr) } if err != nil { t.Fatalf("cannot fetch offset: %s", err) } if offset != 123 { t.Fatalf("expected 123 offset, got %d", offset) } srv1.Close() offset, err = broker.offset("test", 1, -2) if handlerErr != nil { t.Fatalf("handler error: %s", handlerErr) } if err != nil { t.Fatalf("cannot fetch offset: %s", err) } if offset != 234 { t.Fatalf("expected 234 offset, got %d", offset) } srv2.Close() } func TestLeaderConnectionFailover(t *testing.T) { srv1 := NewServer() srv1.Start() defer srv1.Close() srv2 := NewServer() srv2.Start() defer srv2.Close() addresses := []string{srv1.Address()} host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() srv1.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ { NodeID: 1, Host: host1, Port: int32(port1), }, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1}, Isrs: []int32{1}, }, }, }, }, } }) srv2.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ { NodeID: 2, Host: host2, Port: int32(port2), }, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 2, Replicas: []int32{2}, Isrs: []int32{2}, }, }, }, }, } }) conf := newTestBrokerConf("tester") conf.DialTimeout = time.Millisecond * 20 conf.LeaderRetryWait = time.Millisecond conf.LeaderRetryLimit = 3 broker, err := Dial(addresses, conf) if err != nil { t.Fatalf("cannot create broker: %s", err) } if _, err := broker.muLeaderConnection("does-not-exist", 123456); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("%s expected, got %s", proto.ErrUnknownTopicOrPartition, err) } conn, err := broker.muLeaderConnection("test", 0) if err != nil { t.Fatalf("%s", err) } tp := topicPartition{"test", 0} nodeID, ok := broker.metadata.endpoints[tp] if !ok { t.Fatal("endpoint not found") } if nodeID != 1 { t.Fatalf("wrong nodeID = %d, expected 1", nodeID) } srv1.Close() broker.muCloseDeadConnection(conn) if _, err := broker.muLeaderConnection("test", 0); err == nil { t.Fatal("expected network error") } // provide node address that will be available after short period broker.metadata.nodes = map[int32]string{ 2: fmt.Sprintf("%s:%d", host2, port2), } _, err = broker.muLeaderConnection("test", 0) if err != nil { t.Fatalf("%s", err) } nodeID, ok = broker.metadata.endpoints[tp] if !ok { t.Fatal("endpoint not found") } if nodeID != 2 { t.Fatalf("wrong nodeID = %d, expected 2", nodeID) } } func TestProducerFailoverRequestTimeout(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) requestsCount := 0 srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) requestsCount++ return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Err: proto.ErrRequestTimeout, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryLimit = 4 prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) _, err = producer.Produce("test", 0, &proto.Message{Value: []byte("first")}, &proto.Message{Value: []byte("second")}) if err != proto.ErrRequestTimeout { t.Fatalf("expected %s, got %s", proto.ErrRequestTimeout, err) } if requestsCount != prodConf.RetryLimit { t.Fatalf("expected %d requests, got %d", prodConf.RetryLimit, requestsCount) } } func TestProducerFailoverLeaderNotAvailable(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) requestsCount := 0 srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) requestsCount++ if requestsCount > 4 { return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 11, }, }, }, }, } } return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Err: proto.ErrLeaderNotAvailable, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryLimit = 5 prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) _, err = producer.Produce("test", 0, &proto.Message{Value: []byte("first")}, &proto.Message{Value: []byte("second")}) if err != nil { t.Fatalf("expected no error, got %s", err) } if requestsCount != 5 { t.Fatalf("expected 5 requests, got %d", requestsCount) } } func TestProducerNoCreateTopic(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() md := NewMetadataHandler(srv, false) srv.Handle(proto.MetadataReqKind, md.Handler()) produces := 0 srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { produces++ // Must return something? req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test2", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 5, }, }, }, }, } }, ) // Broker DO NOT create topic brokerConf := newTestBrokerConf("test") brokerConf.AllowTopicCreation = false broker, err := Dial([]string{srv.Address()}, brokerConf) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryLimit = 5 prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) _, err = producer.Produce("test2", 0, &proto.Message{Value: []byte("first")}, &proto.Message{Value: []byte("second")}) if err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected ErrUnknownTopicOrPartition, got %s", err) } if md.NumSpecificFetches() != 0 { t.Fatalf("expected 0 specific topic metadata requests, got %d", md.NumSpecificFetches()) } if produces != 0 { t.Fatalf("expected 0 produce attempts, got %d", produces) } } func TestProducerTryCreateTopic(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() md := NewMetadataHandler(srv, true) srv.Handle(proto.MetadataReqKind, md.Handler()) produces := 0 srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { produces++ // Must return something? req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test2", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 5, }, }, }, }, } }, ) // Broker DO create topic brokerConf := newTestBrokerConf("test") brokerConf.AllowTopicCreation = true broker, err := Dial([]string{srv.Address()}, brokerConf) if err != nil { t.Fatalf("cannot create broker: %s", err) } prodConf := NewProducerConf() prodConf.RetryLimit = 5 prodConf.RetryWait = time.Millisecond producer := broker.Producer(prodConf) _, err = producer.Produce("test2", 0, &proto.Message{Value: []byte("first")}, &proto.Message{Value: []byte("second")}) if err != nil { t.Fatalf("expected no error, got %s", err) } if md.NumSpecificFetches() != 1 { t.Fatalf("expected 1 specific topic metadata requests, got %d", md.NumSpecificFetches()) } if produces != 1 { t.Fatalf("expected 1 produce attempts, got %d", produces) } } func TestConsumeWhileLeaderChange(t *testing.T) { srv1 := NewServer() srv1.Start() defer srv1.Close() srv2 := NewServer() srv2.Start() defer srv2.Close() host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() brokers := []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, } var metaCalls int metadataHandler := func(srvName string) func(Serializable) Serializable { return func(request Serializable) Serializable { metaCalls++ var leader int32 // send invalid information to producer several times to make sure // client is consuming wrong node and retrying several times before // succeeding if metaCalls < 3 { leader = 1 } else if metaCalls < 6 { leader = 0 } else { leader = 2 } req := request.(*proto.MetadataReq) resp := &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: brokers, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: leader, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } return resp } } srv1.Handle(proto.MetadataReqKind, metadataHandler("srv1")) srv2.Handle(proto.MetadataReqKind, metadataHandler("srv2")) var fetch1Calls int srv1.Handle(proto.FetchReqKind, func(request Serializable) Serializable { fetch1Calls++ req := request.(*proto.FetchReq) if fetch1Calls == 1 { return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 1, TipOffset: 4, Messages: []*proto.Message{ {Offset: 1, Value: []byte("first")}, }, }, }, }, }, } } respErr := proto.ErrNotLeaderForPartition if fetch1Calls > 2 { respErr = proto.ErrUnknownTopicOrPartition } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 1, Err: respErr, }, }, }, }, } }) var fetch2Calls int srv2.Handle(proto.FetchReqKind, func(request Serializable) Serializable { fetch2Calls++ req := request.(*proto.FetchReq) return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 1, TipOffset: 8, Messages: []*proto.Message{ {Offset: 2, Value: []byte("second")}, }, }, }, }, }, } }) broker, err := Dial([]string{srv1.Address()}, newTestBrokerConf("tester")) if err != nil { t.Fatalf("cannot create broker: %s", err) } defer broker.Close() conf := NewConsumerConf("test", 1) conf.StartOffset = 0 cons, err := broker.Consumer(conf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } // consume twice - once from srv1 and once from srv2 if m, err := cons.Consume(); err != nil { t.Errorf("cannot consume: %s", err) } else if m.Offset != 1 { t.Errorf("expected offset to be 1, got %+v", m) } if m, err := cons.Consume(); err != nil { t.Errorf("cannot consume: %s", err) } else if m.Offset != 2 { t.Errorf("expected offset to be 2, got %+v", m) } // 1,2,3 -> srv1 // 4, 5 -> no leader // 6, 7... -> srv2 if metaCalls != 6 { t.Errorf("expected 6 meta calls, got %d", metaCalls) } if fetch1Calls != 3 { t.Errorf("expected fetch1Calls to be 3, got %d", fetch1Calls) } if fetch2Calls != 1 { t.Errorf("expected fetch2Calls to be 1, got %d", fetch2Calls) } } func TestConsumerFailover(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() messages := []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, } srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) respCount := 0 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { respCount++ req := request.(*proto.FetchReq) if respCount == 4 { return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 1, Err: nil, }, }, }, }, } } resp := &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 1, Messages: messages, }, }, }, }, } messages = []*proto.Message{} return resp }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { t.Fatalf("cannot create broker: %s", err) } conf := NewConsumerConf("test", 1) conf.RetryWait = time.Nanosecond conf.RetryLimit = 4 conf.RetryErrWait = time.Nanosecond conf.StartOffset = 0 consumer, err := broker.Consumer(conf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } for { msg, err := consumer.Consume() if err != nil { t.Fatalf("failed to consume: %s", err) } if string(msg.Value) != "first" { t.Fatalf("expected first message got %#q", msg) } msg, err = consumer.Consume() if err != nil { t.Fatalf("failed to consume: %s", err) } if string(msg.Value) != "second" { t.Fatalf("expected second message got %#q", msg) } if msg, err := consumer.Consume(); err != ErrNoData { t.Fatalf("expected no data, got %#v (%#q)", err, msg) } return } } func TestProducerBrokenPipe(t *testing.T) { srv1 := NewServer() srv1.Start() srv2 := NewServer() srv2.Start() host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() srv1.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv1.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 12345, }, }, }, }, } }) // after closing first server, which started as leader, broker should ask // other nodes about the leader and refresh connections srv2.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv2.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: 12346, }, }, }, }, } }) broker, err := Dial([]string{srv1.Address()}, newTestBrokerConf("test-epipe")) if err != nil { t.Fatalf("cannot create broker: %s", err) } data := []byte(strings.Repeat(` http://stackoverflow.com/questions/11200510/how-to-simulate-abnormal-case-for-socket-tcp-programming-in-linux-such-as-termi How to get the error EPIPE? To get the error EPIPE, you need to send large amount of data after closing the socket on the peer side. You can get more info about EPIPE error from this SO Link. I had asked a question about Broken Pipe Error in the link provided and the accepted answer gives a detailed explanation. It is important to note that to get EPIPE error you should have set the flags parameter of send to MSG_NOSIGNAL. Without that, an abnormal send can generate SIGPIPE signal. `, 1000)) pconf := NewProducerConf() pconf.RetryWait = time.Millisecond pconf.RequestTimeout = time.Millisecond * 20 producer := broker.Producer(pconf) // produce whatever to fill the cache if _, err = producer.Produce("test", 0, &proto.Message{Value: data}); err != nil { t.Fatalf("cannot produce: %s", err) } srv1.Close() if _, err = producer.Produce("test", 0, &proto.Message{Value: data}); err != nil { t.Fatalf("cannot produce: %s", err) } } func TestFetchOffset(t *testing.T) { const offset = 94 srv := NewServer() srv.Start() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) off := req.Topics[0].Partitions[0].FetchOffset if off != offset { panic(fmt.Sprintf("expected fetch offset to be 3, got %d", off)) } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, Messages: []*proto.Message{ {Offset: offset}, }, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test-fetch-offset")) if err != nil { t.Fatalf("cannot create broker: %s", err) } conf := NewConsumerConf("test", 0) conf.StartOffset = offset consumer, err := broker.Consumer(conf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } msg, err := consumer.Consume() if err != nil { t.Fatalf("cannot consume message: %s", err) } if msg.Offset != offset { t.Fatalf("expected %d offset, got %d", offset, msg.Offset) } } func TestLatestOffset(t *testing.T) { const offset = 94 srv := NewServer() srv.Start() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) srv.Handle(proto.OffsetReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetReq) return &proto.OffsetResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 0, Offsets: []int64{offset}, }, }, }, }, } }) conf := newTestBrokerConf("test-latest-offset") conf.RetryErrWait = time.Millisecond broker, err := Dial([]string{srv.Address()}, conf) if err != nil { t.Fatalf("cannot create broker: %s", err) return } var wg sync.WaitGroup ch := make(chan struct{}) ech := make(chan error, 2) getLatest := func() { _ = <-ch defer wg.Done() for i := 0; i < 100; i++ { _, err := broker.OffsetLatest("test", 0) if err != nil { ech <- errors.New("Failed to fetch the latest offset") } } } wg.Add(1) go getLatest() wg.Add(1) go getLatest() close(ch) wg.Wait() select { case e := <-ech: t.Fatal(e) default: return } } func TestConsumerBrokenPipe(t *testing.T) { srv1 := NewServer() srv1.Start() srv2 := NewServer() srv2.Start() host1, port1 := srv1.HostPort() host2, port2 := srv2.HostPort() longBytes := []byte(strings.Repeat(`xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`, 1000)) srv1.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 1, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv1.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, Messages: []*proto.Message{ {Offset: 0, Value: longBytes}, }, }, }, }, }, } }) // after closing first server, which started as leader, broker should ask // other nodes about the leader and refresh connections srv2.Handle(proto.MetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.MetadataReq) return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host1, Port: int32(port1)}, {NodeID: 2, Host: host2, Port: int32(port2)}, }, Topics: []proto.MetadataRespTopic{ { Name: "test", Partitions: []proto.MetadataRespPartition{ { ID: 0, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, { ID: 1, Leader: 2, Replicas: []int32{1, 2}, Isrs: []int32{1, 2}, }, }, }, }, } }) srv2.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, Messages: []*proto.Message{ {Offset: 1, Value: longBytes}, }, }, }, }, }, } }) bconf := newTestBrokerConf("test-epipe") broker, err := Dial([]string{srv1.Address()}, bconf) if err != nil { t.Fatalf("cannot create broker: %s", err) } conf := NewConsumerConf("test", 0) conf.RetryErrWait = time.Millisecond conf.RetryWait = time.Millisecond conf.StartOffset = 0 consumer, err := broker.Consumer(conf) if err != nil { t.Fatalf("cannot create consumer: %s", err) } if _, err = consumer.Consume(); err != nil { t.Fatalf("cannot consume: %s", err) } srv1.Close() // this should succeed after reconnecting to second node if _, err = consumer.Consume(); err != nil { t.Fatalf("cannot consume: %s", err) } } func TestOffsetCoordinator(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() setOffset := int64(-1) srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) srv.Handle(proto.ConsumerMetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.ConsumerMetadataReq) host, port := srv.HostPort() return &proto.ConsumerMetadataResp{ CorrelationID: req.GetCorrelationID(), Err: nil, CoordinatorID: 1, CoordinatorHost: host, CoordinatorPort: int32(port), } }) srv.Handle(proto.OffsetCommitReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetCommitReq) setOffset = req.Topics[0].Partitions[0].Offset return &proto.OffsetCommitResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetCommitRespTopic{ { Name: "first-topic", Partitions: []proto.OffsetCommitRespPartition{ { ID: 0, Err: nil, }, }, }, }, } }) srv.Handle(proto.OffsetFetchReqKind, func(request Serializable) Serializable { req := request.(*proto.OffsetFetchReq) var partition proto.OffsetFetchRespPartition if setOffset == -1 { partition.Err = proto.ErrUnknownTopicOrPartition } else { partition = proto.OffsetFetchRespPartition{ ID: 0, Offset: int64(setOffset), Err: nil, Metadata: "random data", } } return &proto.OffsetFetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.OffsetFetchRespTopic{ { Name: "first-topic", Partitions: []proto.OffsetFetchRespPartition{partition}, }, }, } }) conf := newTestBrokerConf("tester") broker, err := Dial([]string{srv.Address()}, conf) if err != nil { t.Fatalf("cannot create broker: %s", err) } coordConf := NewOffsetCoordinatorConf("test-group") coordinator, err := broker.OffsetCoordinator(coordConf) if err != nil { t.Fatalf("cannot create coordinator: %s", err) } if off, meta, err := coordinator.Offset("does-not-exists", 1423); err == nil { t.Fatalf("expected error, got %d, %q", off, meta) } if _, _, err := coordinator.Offset("first-topic", 0); err != proto.ErrUnknownTopicOrPartition { t.Fatalf("expected %q error, got %s", proto.ErrUnknownTopicOrPartition, err) } if err := coordinator.Commit("first-topic", 0, 421); err != nil { t.Fatalf("expected no error, got %s", err) } off, meta, err := coordinator.Offset("first-topic", 0) if err != nil { t.Fatalf("expected no error, got %s", err) } if off != 421 || meta != "random data" { t.Fatalf("unexpected data %d and %q", off, meta) } } func TestOffsetCoordinatorNoCoordinatorError(t *testing.T) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) srv.Handle(proto.ConsumerMetadataReqKind, func(request Serializable) Serializable { req := request.(*proto.ConsumerMetadataReq) return &proto.ConsumerMetadataResp{ CorrelationID: req.GetCorrelationID(), Err: proto.ErrNoCoordinator, CoordinatorID: 0, CoordinatorHost: "", CoordinatorPort: 0, } }) conf := newTestBrokerConf("tester") broker, err := Dial([]string{srv.Address()}, conf) if err != nil { t.Fatalf("cannot create broker: %s", err) } coordConf := NewOffsetCoordinatorConf("test-group") if _, err := broker.OffsetCoordinator(coordConf); err != proto.ErrNoCoordinator { t.Fatalf("expected %q error, got %v", proto.ErrNoCoordinator, err) } } func TestExtractMessages(t *testing.T) { type testCase struct { resp proto.FetchResp topic string partition int32 expMsgs []*proto.Message expRetry bool expError bool } for name, test := range map[string]testCase{ "retry": { resp: proto.FetchResp{ Topics: []proto.FetchRespTopic{{ Name: "topic1", Partitions: []proto.FetchRespPartition{{ ID: 0, Err: proto.ErrLeaderNotAvailable, }}, }}, }, topic: "topic1", partition: 0, expMsgs: nil, expRetry: true, expError: true, }, "v1": { resp: proto.FetchResp{ Topics: []proto.FetchRespTopic{{ Name: "topic1", Partitions: []proto.FetchRespPartition{{ ID: 0, MessageVersion: 1, Messages: []*proto.Message{{ Key: []byte("key"), Value: []byte("value"), }}, }}, }}, }, topic: "topic1", partition: 0, expMsgs: []*proto.Message{{ Key: []byte("key"), Value: []byte("value"), }}, expRetry: false, expError: false, }, "v2": { resp: proto.FetchResp{ Topics: []proto.FetchRespTopic{{ Name: "topic1", Partitions: []proto.FetchRespPartition{{ ID: 0, MessageVersion: 2, TipOffset: 532, RecordBatches: []*proto.RecordBatch{{ FirstOffset: 3, Records: []*proto.Record{{ OffsetDelta: 0, Key: []byte("key3"), Value: []byte("value3"), }, { OffsetDelta: 1, Key: []byte("key4"), Value: []byte("value4"), }}, }, { FirstOffset: 5, Records: []*proto.Record{{ OffsetDelta: 0, Key: []byte("key5"), Value: []byte("value5"), }}, }}, }}, }}, }, topic: "topic1", partition: 0, expMsgs: []*proto.Message{{ Key: []byte("key3"), Value: []byte("value3"), Offset: 3, Topic: "topic1", Partition: 0, TipOffset: 532, }, { Key: []byte("key4"), Value: []byte("value4"), Offset: 4, Topic: "topic1", Partition: 0, TipOffset: 532, }, { Key: []byte("key5"), Value: []byte("value5"), Offset: 5, Topic: "topic1", Partition: 0, TipOffset: 532, }}, expRetry: false, expError: false, }, "no data": { resp: proto.FetchResp{ Topics: []proto.FetchRespTopic{{ Name: "topicX", Partitions: []proto.FetchRespPartition{{ ID: 0, }}, }, { Name: "topicY", }, { Name: "topic1", Partitions: []proto.FetchRespPartition{{ ID: 1, }}, }}, }, topic: "topic1", partition: 0, expMsgs: nil, expRetry: false, expError: true, }, } { t.Run(name, func(t *testing.T) { conf := ConsumerConf{ Topic: test.topic, Partition: test.partition, Logger: &nullLogger{}, } gotMsgs, gotRetry, gotErr := extractMessages(&test.resp, conf) if gotRetry != test.expRetry { t.Fatalf("got retry: %t but expected: %t", gotRetry, test.expRetry) } if (gotErr != nil) != test.expError { t.Fatalf("got error: [%s] but expected error: %t", gotErr, test.expError) } if !reflect.DeepEqual(gotMsgs, test.expMsgs) { t.Fatalf("got msgs %#v but expected %#v", gotMsgs, test.expMsgs) } }) } } func BenchmarkConsumer_10Msgs(b *testing.B) { benchmarkConsumer(b, 10) } func BenchmarkConsumer_100Msgs(b *testing.B) { benchmarkConsumer(b, 100) } func BenchmarkConsumer_500Msgs(b *testing.B) { benchmarkConsumer(b, 500) } func BenchmarkConsumer_1000Msgs(b *testing.B) { benchmarkConsumer(b, 1000) } // this is not the best benchmark, because Server implementation is // not made for performance, but it should be good enough to help tuning code. func benchmarkConsumer(b *testing.B, messagesPerResp int) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) var msgOffset int64 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) messages := make([]*proto.Message, messagesPerResp) for i := range messages { msgOffset++ msg := &proto.Message{ Offset: msgOffset, Value: []byte(`Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur.`), } messages[i] = msg } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, TipOffset: msgOffset - int64(len(messages)), Messages: messages, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { b.Fatalf("cannot create broker: %s", err) } conf := NewConsumerConf("test", 0) conf.StartOffset = 0 consumer, err := broker.Consumer(conf) if err != nil { b.Fatalf("cannot create consumer: %s", err) } b.ResetTimer() for i := 0; i < b.N; i++ { _, err := consumer.Consume() if err != nil { b.Fatalf("cannot fetch message: %s", err) } } } func BenchmarkConsumerConcurrent_8Consumers(b *testing.B) { benchmarkConsumerConcurrent(b, 8) } func BenchmarkConsumerConcurrent_32Consumers(b *testing.B) { benchmarkConsumerConcurrent(b, 32) } func BenchmarkConsumerConcurrent_64Consumers(b *testing.B) { benchmarkConsumerConcurrent(b, 64) } // this is not the best benchmark, because Server implementation is // not made for performance, but it should be good enough to help tuning code. func benchmarkConsumerConcurrent(b *testing.B, concurrentConsumers int) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) var msgOffset int64 srv.Handle(proto.FetchReqKind, func(request Serializable) Serializable { req := request.(*proto.FetchReq) messages := make([]*proto.Message, concurrentConsumers*2000) for i := range messages { msgOffset++ msg := &proto.Message{ Offset: msgOffset, Value: []byte(`Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur.`), } messages[i] = msg } return &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.FetchRespTopic{ { Name: "test", Partitions: []proto.FetchRespPartition{ { ID: 0, TipOffset: msgOffset - int64(len(messages)), Messages: messages, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("test")) if err != nil { b.Fatalf("cannot create broker: %s", err) } conf := NewConsumerConf("test", 0) conf.StartOffset = 0 consumer, err := broker.Consumer(conf) if err != nil { b.Fatalf("cannot create consumer: %s", err) } start := make(chan struct{}) var wg sync.WaitGroup wg.Add(concurrentConsumers) for i := 0; i < concurrentConsumers; i++ { go func(c Consumer) { defer wg.Done() for i := 0; i < b.N/concurrentConsumers; i++ { _, err := c.Consume() if err != nil { b.Fatalf("cannot fetch message: %s", err) } } }(consumer) } b.ResetTimer() close(start) wg.Wait() } func BenchmarkProducer_1Msgs(b *testing.B) { benchmarkProducer(b, 1) } func BenchmarkProducer_2Msgs(b *testing.B) { benchmarkProducer(b, 2) } func BenchmarkProducer_10Msgs(b *testing.B) { benchmarkProducer(b, 10) } func BenchmarkProducer_50Msgs(b *testing.B) { benchmarkProducer(b, 50) } func BenchmarkProducer_200Msgs(b *testing.B) { benchmarkProducer(b, 200) } func BenchmarkProducer_1000Msgs(b *testing.B) { benchmarkProducer(b, 1000) } func benchmarkProducer(b *testing.B, messagesPerReq int64) { srv := NewServer() srv.Start() defer srv.Close() srv.Handle(proto.MetadataReqKind, NewMetadataHandler(srv, false).Handler()) var msgOffset int64 srv.Handle(proto.ProduceReqKind, func(request Serializable) Serializable { req := request.(*proto.ProduceReq) msgOffset += messagesPerReq return &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), Topics: []proto.ProduceRespTopic{ { Name: "test", Partitions: []proto.ProduceRespPartition{ { ID: 0, Offset: msgOffset, }, }, }, }, } }) broker, err := Dial([]string{srv.Address()}, newTestBrokerConf("tester")) if err != nil { b.Fatalf("cannot create broker: %s", err) } messages := make([]*proto.Message, messagesPerReq) for i := range messages { msg := &proto.Message{ Value: []byte(`Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur.`), } messages[i] = msg } producer := broker.Producer(NewProducerConf()) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := producer.Produce("test", 0, messages...); err != nil { b.Fatalf("cannot produce message: %s", err) } } } kafka-2.1.1/v2/connection.go000066400000000000000000000274561356004474300156310ustar00rootroot00000000000000package kafka import ( "bufio" "bytes" "crypto/tls" "crypto/x509" "errors" "fmt" "math" "net" "sync" "time" "github.com/optiopay/kafka/v2/proto" ) // ErrClosed is returned as result of any request made using closed connection. var ErrClosed = errors.New("closed") // Low level abstraction over connection to Kafka. type connection struct { rw net.Conn stop chan struct{} nextID chan int32 logger Logger mu sync.Mutex respc map[int32]chan []byte stopErr error readTimeout time.Duration apiVersions map[int16]proto.SupportedVersion } func newTLSConnection(address string, ca, cert, key []byte, timeout, readTimeout time.Duration) (*connection, error) { var fetchVersions = true for { roots := x509.NewCertPool() ok := roots.AppendCertsFromPEM(ca) if !ok { return nil, fmt.Errorf("Cannot parse root certificate") } certificate, err := tls.X509KeyPair(cert, key) if err != nil { return nil, fmt.Errorf("Failed to parse key/cert for TLS: %s", err) } conf := &tls.Config{ Certificates: []tls.Certificate{certificate}, RootCAs: roots, } dialer := net.Dialer{ Timeout: timeout, KeepAlive: 30 * time.Second, } conn, err := tls.DialWithDialer(&dialer, "tcp", address, conf) if err != nil { return nil, err } c := &connection{ stop: make(chan struct{}), nextID: make(chan int32), rw: conn, respc: make(map[int32]chan []byte), logger: &nullLogger{}, readTimeout: readTimeout, apiVersions: make(map[int16]proto.SupportedVersion), } go c.nextIDLoop() go c.readRespLoop() if fetchVersions { if c.cacheApiVersions() != nil { fetchVersions = false //required for errorchk _ = c.Close() } } return c, nil } } // newConnection returns new, initialized connection or error func newTCPConnection(address string, timeout, readTimeout time.Duration) (*connection, error) { var fetchVersions = true for { dialer := net.Dialer{ Timeout: timeout, KeepAlive: 30 * time.Second, } conn, err := dialer.Dial("tcp", address) if err != nil { return nil, err } c := &connection{ stop: make(chan struct{}), nextID: make(chan int32), rw: conn, respc: make(map[int32]chan []byte), logger: &nullLogger{}, readTimeout: readTimeout, apiVersions: make(map[int16]proto.SupportedVersion), } go c.nextIDLoop() go c.readRespLoop() if fetchVersions { if c.cacheApiVersions() != nil { fetchVersions = false //required for errorchk _ = c.Close() continue } } return c, nil } } func (c *connection) cacheApiVersions() error { apiVersions, err := c.APIVersions(&proto.APIVersionsReq{}) if err != nil { c.logger.Debug("cannot fetch apiversions", "error", err) return err } for _, api := range apiVersions.APIVersions { c.apiVersions[api.APIKey] = api } return nil } //getBestVersion returns version for passed apiKey which best fit server and client requirements func (c *connection) getBestVersion(apiKey int16) int16 { if requested, ok := c.apiVersions[apiKey]; ok { supported := proto.SupportedByDriver[apiKey] if min(supported.MaxVersion, requested.MaxVersion) >= max(supported.MinVersion, requested.MinVersion) { return min(supported.MaxVersion, requested.MaxVersion) } } return 0 } func min(a int16, b int16) int16 { if a < b { return a } return b } func max(a int16, b int16) int16 { if a > b { return a } return b } // nextIDLoop generates correlation IDs, making sure they are always in order // and within the scope of request-response mapping array. func (c *connection) nextIDLoop() { var id int32 = 1 for { select { case <-c.stop: close(c.nextID) return case c.nextID <- id: id++ if id == math.MaxInt32 { id = 1 } } } } // readRespLoop constantly reading response messages from the socket and after // partial parsing, sends byte representation of the whole message to request // sending process. func (c *connection) readRespLoop() { defer func() { c.mu.Lock() for _, cc := range c.respc { close(cc) } c.respc = make(map[int32]chan []byte) c.mu.Unlock() }() rd := bufio.NewReader(c.rw) for { if c.readTimeout > 0 { err := c.rw.SetReadDeadline(time.Now().Add(c.readTimeout)) if err != nil { c.logger.Error("msg", "SetReadDeadline failed", "error", err) } } correlationID, b, err := proto.ReadResp(rd) if err != nil { c.mu.Lock() if c.stopErr == nil { c.stopErr = err close(c.stop) } c.mu.Unlock() return } c.mu.Lock() rc, ok := c.respc[correlationID] delete(c.respc, correlationID) c.mu.Unlock() if !ok { c.logger.Warn( "msg", "response to unknown request", "correlationID", correlationID) continue } select { case <-c.stop: c.mu.Lock() if c.stopErr == nil { c.stopErr = ErrClosed } c.mu.Unlock() case rc <- b: } close(rc) } } // respWaiter register listener to response message with given correlationID // and return channel that single response message will be pushed to once it // will arrive. // After pushing response message, channel is closed. // // Upon connection close, all unconsumed channels are closed. func (c *connection) respWaiter(correlationID int32) (respc chan []byte, err error) { c.mu.Lock() defer c.mu.Unlock() if c.stopErr != nil { return nil, c.stopErr } if _, ok := c.respc[correlationID]; ok { c.logger.Error("msg", "correlation conflict", "correlationID", correlationID) return nil, fmt.Errorf("correlation conflict: %d", correlationID) } respc = make(chan []byte) c.respc[correlationID] = respc return respc, nil } // releaseWaiter removes response channel from waiters pool and close it. // Calling this method for unknown correlationID has no effect. func (c *connection) releaseWaiter(correlationID int32) { c.mu.Lock() rc, ok := c.respc[correlationID] if ok { delete(c.respc, correlationID) close(rc) } c.mu.Unlock() } // Close close underlying transport connection and cancel all pending response // waiters. func (c *connection) Close() error { c.mu.Lock() if c.stopErr == nil { c.stopErr = ErrClosed close(c.stop) } c.mu.Unlock() return c.rw.Close() } func (c *connection) sendRequest(req proto.Request) ([]byte, error) { proto.SetVersion(req.GetHeader(), c.getBestVersion(req.Kind())) var ok bool var correlationID int32 if correlationID, ok = <-c.nextID; !ok { return nil, c.stopErr } proto.SetCorrelationID(req.GetHeader(), correlationID) respc, err := c.respWaiter(req.GetCorrelationID()) if err != nil { c.logger.Error("msg", "failed waiting for response", "error", err) return nil, fmt.Errorf("wait for response: %s", err) } if _, err := req.WriteTo(c.rw); err != nil { c.logger.Error("msg", "cannot write", "error", err) c.releaseWaiter(req.GetCorrelationID()) return nil, err } b, ok := <-respc if !ok { return nil, c.stopErr } return b, nil } func (c *connection) sendRequestWithoutAcks(req proto.Request) error { var ok bool var correlationID int32 if correlationID, ok = <-c.nextID; !ok { return c.stopErr } proto.SetCorrelationID(req.GetHeader(), correlationID) proto.SetVersion(req.GetHeader(), c.getBestVersion(req.Kind())) _, err := req.WriteTo(c.rw) return err } // APIVersions sends a request to fetch the supported versions for each API. // Versioning is only supported in Kafka versions above 0.10.0.0 func (c *connection) APIVersions(req *proto.APIVersionsReq) (*proto.APIVersionsResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedAPIVersionsResp(bytes.NewReader(b), req.GetVersion()) } // Metadata sends given metadata request to kafka node and returns related // metadata response. // Calling this method on closed connection will always return ErrClosed. func (c *connection) Metadata(req *proto.MetadataReq) (*proto.MetadataResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedMetadataResp(bytes.NewReader(b), req.GetVersion()) } // CreateTopic sends given createTopic request to kafka node and returns related // response. // Calling this method on closed connection will always return ErrClosed. func (c *connection) CreateTopic(req *proto.CreateTopicsReq) (*proto.CreateTopicsResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadCreateTopicsResp(bytes.NewReader(b)) } // Produce sends given produce request to kafka node and returns related // response. Sending request with no ACKs flag will result with returning nil // right after sending request, without waiting for response. // Calling this method on closed connection will always return ErrClosed. func (c *connection) Produce(req *proto.ProduceReq) (*proto.ProduceResp, error) { if req.RequiredAcks == proto.RequiredAcksNone { return nil, c.sendRequestWithoutAcks(req) } b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedProduceResp(bytes.NewReader(b), req.GetVersion()) } // Fetch sends given fetch request to kafka node and returns related response. // Calling this method on closed connection will always return ErrClosed. func (c *connection) Fetch(req *proto.FetchReq) (*proto.FetchResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } resp, err := proto.ReadVersionedFetchResp(bytes.NewReader(b), req.GetVersion()) if err != nil { return nil, err } trimLeadingMessages(req, resp) return resp, nil } // trimLeadingMessages removes any messages from the response that are before // the requested offset. // // This may be sent by the server because compressed messages are returned in // full batches for efficiency (the broker doesn't need to decompress). This // means that it's possible to get some leading messages with a smaller offset // than requested. func trimLeadingMessages(req *proto.FetchReq, resp *proto.FetchResp) { for ti := range resp.Topics { topic := &resp.Topics[ti] reqTopic := &req.Topics[ti] for pi := range topic.Partitions { partition := &topic.Partitions[pi] requestedOffset := reqTopic.Partitions[pi].FetchOffset if partition.MessageVersion < 2 { i := 0 for _, msg := range partition.Messages { if msg.Offset >= requestedOffset { break } i++ } partition.Messages = partition.Messages[i:] } else { for _, rb := range partition.RecordBatches { i := 0 firstOffset := rb.FirstOffset for _, rec := range rb.Records { if firstOffset+rec.OffsetDelta >= requestedOffset { break } i++ } rb.Records = rb.Records[i:] } } } } } // Offset sends given offset request to kafka node and returns related response. // Calling this method on closed connection will always return ErrClosed. func (c *connection) Offset(req *proto.OffsetReq) (*proto.OffsetResp, error) { // TODO(husio) documentation is not mentioning this directly, but I assume // -1 is for non node clients req.ReplicaID = -1 b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedOffsetResp(bytes.NewReader(b), req.GetVersion()) } func (c *connection) ConsumerMetadata(req *proto.ConsumerMetadataReq) (*proto.ConsumerMetadataResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedConsumerMetadataResp(bytes.NewReader(b), req.GetVersion()) } func (c *connection) OffsetCommit(req *proto.OffsetCommitReq) (*proto.OffsetCommitResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedOffsetCommitResp(bytes.NewReader(b), req.GetVersion()) } func (c *connection) OffsetFetch(req *proto.OffsetFetchReq) (*proto.OffsetFetchResp, error) { b, err := c.sendRequest(req) if err != nil { return nil, err } return proto.ReadVersionedOffsetFetchResp(bytes.NewReader(b), req.GetVersion()) } kafka-2.1.1/v2/connection_test.go000066400000000000000000000523371356004474300166640ustar00rootroot00000000000000package kafka import ( "bytes" "crypto/tls" "crypto/x509" "fmt" "io" "io/ioutil" "log" "net" "reflect" "strings" "testing" "time" "github.com/optiopay/kafka/v2/proto" ) const TLSCaFile = "../testkeys/ca.crt" const TLSCertFile = "../testkeys/oats.crt" const TLSKeyFile = "../testkeys/oats.key" type serializableMessage interface { Bytes() ([]byte, error) } type TLSConf struct { ca []byte cert []byte key []byte } func getTLSConf() (*TLSConf, error) { ca, err := ioutil.ReadFile(TLSCaFile) if err != nil { return nil, fmt.Errorf("Cannot read %s", TLSCaFile) } cert, err := ioutil.ReadFile(TLSCertFile) if err != nil { return nil, fmt.Errorf("Cannot read %s", TLSCertFile) } key, err := ioutil.ReadFile(TLSKeyFile) if err != nil { return nil, fmt.Errorf("Cannot read %s", TLSKeyFile) } return &TLSConf{ca: ca, cert: cert, key: key}, nil } //just read request before start to response func readRequest(r io.Reader) error { dec := proto.NewDecoder(r) size := dec.DecodeInt32() var read int32 = 0 buf := make([]byte, size) for read < size { n, err := r.Read(buf) if err != nil { return err } read += int32(n) } return nil } func testTLSServer(messages ...serializableMessage) (net.Listener, error) { tlsConf, err := getTLSConf() if err != nil { return nil, err } roots := x509.NewCertPool() ok := roots.AppendCertsFromPEM(tlsConf.ca) if !ok { return nil, fmt.Errorf("Cannot parse root certificate") } certificate, err := tls.X509KeyPair(tlsConf.cert, tlsConf.key) if err != nil { return nil, fmt.Errorf("Failed to parse key/cert for TLS: %s", err) } conf := &tls.Config{ Certificates: []tls.Certificate{certificate}, RootCAs: roots, } _ = conf ln, err := tls.Listen("tcp4", "localhost:22222", conf) if err != nil { return nil, err } responses := make([][]byte, len(messages)) for i, m := range messages { b, err := m.Bytes() if err != nil { _ = ln.Close() return nil, err } responses[i] = b } go func() { for { cli, err := ln.Accept() if err != nil { return } go func(conn net.Conn) { time.Sleep(time.Millisecond * 50) for _, resp := range responses { err := readRequest(conn) if err != nil { log.Panic(err) } _, _ = cli.Write(resp) } err = cli.Close() }(cli) } }() return ln, nil } func testServer(messages ...serializableMessage) (net.Listener, error) { ln, err := net.Listen("tcp4", "") if err != nil { return nil, err } responses := make([][]byte, len(messages)) for i, m := range messages { b, err := m.Bytes() if err != nil { _ = ln.Close() return nil, err } responses[i] = b } go func() { for { cli, err := ln.Accept() if err != nil { return } go func(conn net.Conn) { time.Sleep(time.Millisecond * 50) for _, resp := range responses { _, _ = cli.Write(resp) } _ = cli.Close() }(cli) } }() return ln, nil } func testServer2() (net.Listener, chan serializableMessage, error) { ln, err := net.Listen("tcp4", "") if err != nil { return nil, nil, err } msgs := make(chan serializableMessage, 10) go func() { for { cli, err := ln.Accept() if err != nil { return } go func(conn net.Conn) { defer func() { _ = cli.Close() }() for msg := range msgs { err := readRequest(conn) if err != nil { log.Panic(err) } b, err := msg.Bytes() if err != nil { panic(err) } if _, err = cli.Write(b); err != nil { return } } }(cli) } }() return ln, msgs, nil } func testServer3() (net.Listener, error) { ln, err := net.Listen("tcp4", "") if err != nil { return nil, err } go func() { for { cli, err := ln.Accept() if err != nil { return } go func(conn net.Conn) { _, _ = cli.Read(make([]byte, 1024)) _ = cli.Close() }(cli) } }() return ln, nil } func testSilentServer() (net.Listener, error) { ln, err := net.Listen("tcp4", "") if err != nil { return nil, err } go func() { for { cli, err := ln.Accept() if err != nil { return } go func(conn net.Conn) { _, _ = cli.Read(make([]byte, 1024)) }(cli) } }() return ln, nil } func TestConnectionMetadata(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } resp1 := &proto.MetadataResp{ CorrelationID: 2, Brokers: []proto.MetadataRespBroker{ { NodeID: 666, Host: "example.com", Port: 999, }, }, Topics: []proto.MetadataRespTopic{ { Name: "foo", Partitions: []proto.MetadataRespPartition{ { ID: 7, Leader: 7, Replicas: []int32{7}, Isrs: []int32{7}, }, }, }, }, } ln, ch, err := testServer2() if err != nil { t.Fatalf("test server error: %s", err) } ch <- versionResp conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } ch <- resp1 resp, err := conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Topics: []string{"first", "second"}, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if !reflect.DeepEqual(resp, resp1) { t.Fatalf("expected different response %#v", resp) } if err := conn.Close(); err != nil { t.Fatalf("could not close kafka connection: %s", err) } if err := ln.Close(); err != nil { t.Fatalf("could not close test server: %s", err) } } func TestConnectionProduce(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } resp1 := &proto.ProduceResp{ CorrelationID: 2, Topics: []proto.ProduceRespTopic{ { Name: "first", Partitions: []proto.ProduceRespPartition{ { ID: 0, Err: nil, Offset: 4, }, }, }, }, } resp2 := &proto.ProduceResp{ CorrelationID: 3, Topics: []proto.ProduceRespTopic{ { Name: "first", Partitions: []proto.ProduceRespPartition{ { ID: 0, Err: proto.ErrLeaderNotAvailable, Offset: -1, }, }, }, }, } ln, msgs, err := testServer2() if err != nil { t.Fatalf("test server error: %s", err) } go func() { time.Sleep(time.Millisecond * 10) msgs <- versionResp }() conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } go func() { time.Sleep(time.Millisecond * 10) msgs <- resp1 time.Sleep(time.Millisecond * 10) msgs <- resp2 }() resp, err := conn.Produce(&proto.ProduceReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Compression: proto.CompressionNone, RequiredAcks: proto.RequiredAcksAll, Timeout: time.Second, Topics: []proto.ProduceReqTopic{ { Name: "first", Partitions: []proto.ProduceReqPartition{ { ID: 0, Messages: []*proto.Message{ {Key: []byte("key 1"), Value: []byte("value 1")}, {Key: []byte("key 2"), Value: []byte("value 2")}, }, }, }, }, }, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if !reflect.DeepEqual(resp, resp1) { t.Fatalf("expected different response %#v", resp) } resp, err = conn.Produce(&proto.ProduceReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Compression: proto.CompressionNone, RequiredAcks: proto.RequiredAcksAll, Timeout: time.Second, Topics: []proto.ProduceReqTopic{ { Name: "first", Partitions: []proto.ProduceReqPartition{ { ID: 0, Messages: []*proto.Message{ {Key: []byte("key"), Value: []byte("value")}, }, }, }, }, }, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if !reflect.DeepEqual(resp, resp2) { t.Fatalf("expected different response %#v", resp) } if err := conn.Close(); err != nil { t.Fatalf("could not close kafka connection: %s", err) } if err := ln.Close(); err != nil { t.Fatalf("could not close test server: %s", err) } } func TestConnectionFetch(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } messages := []*proto.Message{ {Offset: 4, Key: []byte("f"), Value: []byte("first"), TipOffset: 20}, {Offset: 5, Key: []byte("s"), Value: []byte("second"), TipOffset: 20}, {Offset: 6, Key: []byte("t"), Value: []byte("third"), TipOffset: 20}, } for _, m := range messages { m.Crc = proto.ComputeCrc(m, proto.CompressionNone) } resp1 := &proto.FetchResp{ CorrelationID: 2, Topics: []proto.FetchRespTopic{ { Name: "foo", Partitions: []proto.FetchRespPartition{ { ID: 1, Err: nil, TipOffset: 20, Messages: messages, }, }, }, { Name: "bar", Partitions: []proto.FetchRespPartition{ { ID: 6, Err: proto.ErrUnknownTopicOrPartition, TipOffset: -1, Messages: nil, }, }, }, }, } ln, ch, err := testServer2() if err != nil { t.Fatalf("test server error: %s", err) } go func() { time.Sleep(50 * time.Millisecond) ch <- versionResp }() conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } go func() { time.Sleep(50 * time.Millisecond) ch <- resp1 }() resp, err := conn.Fetch(&proto.FetchReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Topics: []proto.FetchReqTopic{ { Name: "foo", Partitions: []proto.FetchReqPartition{ { ID: 1, FetchOffset: 5, }, }, }, { Name: "bar", Partitions: []proto.FetchReqPartition{ { ID: 6, }, }, }, }, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } // before comparison, set attributes as we expect deserializer to do for _, m := range messages { m.Topic = "foo" m.Partition = 1 } // offset 5 was requested; first message should be trimmed resp1.Topics[0].Partitions[0].Messages = messages[1:] if !reflect.DeepEqual(resp, resp1) { t.Fatalf("expected different response %#v", resp) } } func TestTrimRecordBatches(t *testing.T) { resp := &proto.FetchResp{ CorrelationID: 2, Topics: []proto.FetchRespTopic{ { Name: "foo", Partitions: []proto.FetchRespPartition{{ ID: 1, Err: nil, TipOffset: 20, MessageVersion: 2, RecordBatches: []*proto.RecordBatch{{ FirstOffset: 4, Records: []*proto.Record{{ OffsetDelta: 0, Key: []byte("f"), Value: []byte("first"), }, { OffsetDelta: 1, Key: []byte("s"), Value: []byte("second"), }}, }, { FirstOffset: 6, Records: []*proto.Record{{ OffsetDelta: 0, Key: []byte("t"), Value: []byte("third"), }}, }}, }}, }, { Name: "bar", Partitions: []proto.FetchRespPartition{{ ID: 6, Err: proto.ErrUnknownTopicOrPartition, TipOffset: -1, MessageVersion: 2, RecordBatches: nil, }}, }, }, } req := &proto.FetchReq{ Topics: []proto.FetchReqTopic{ { Name: "foo", Partitions: []proto.FetchReqPartition{ { ID: 1, FetchOffset: 5, }, }, }, { Name: "bar", Partitions: []proto.FetchReqPartition{ { ID: 6, }, }, }, }, } trimLeadingMessages(req, resp) rbs := resp.Topics[0].Partitions[0].RecordBatches if got, exp := len(rbs[0].Records), 1; got != exp { t.Fatalf("got %d records in the first batch but expected %d", got, exp) } if got, exp := string(rbs[0].Records[0].Key), "s"; got != exp { t.Fatalf("got first key %q but expected %q", got, exp) } if got, exp := len(rbs[1].Records), 1; got != exp { t.Fatalf("got %d records in the second batch but expected %d", got, exp) } if got, exp := string(rbs[1].Records[0].Key), "t"; got != exp { t.Fatalf("got first key %q but expected %q", got, exp) } } func TestConnectionOffset(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } resp1 := &proto.OffsetResp{ CorrelationID: 2, Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 0, Offsets: []int64{92, 0}, }, }, }, }, } ln, ch, err := testServer2() if err != nil { t.Fatalf("test server error: %s", err) } go func() { time.Sleep(50 * time.Millisecond) ch <- versionResp }() conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } go func() { time.Sleep(50 * time.Millisecond) ch <- resp1 }() resp, err := conn.Offset(&proto.OffsetReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Topics: []proto.OffsetReqTopic{ { Name: "test", Partitions: []proto.OffsetReqPartition{ { ID: 0, TimeMs: -2, MaxOffsets: 2, }, }, }, }, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if !reflect.DeepEqual(resp, resp1) { t.Fatalf("expected different response %#v", resp) } } func TestOffsetResponseWithVersions(t *testing.T) { resp0 := proto.OffsetResp{ CorrelationID: 2, Topics: []proto.OffsetRespTopic{ { Name: "test", Partitions: []proto.OffsetRespPartition{ { ID: 0, Offsets: []int64{92, 0}, }, }, }, }, } b, err := resp0.Bytes() if err != nil { t.Fatal(err) } r0, err := proto.ReadVersionedOffsetResp(bytes.NewReader(b), resp0.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(resp0, *r0) { t.Fatalf("expected different response \n %#v expected \n %#v", resp0, r0) } resp1 := resp0 resp1.Version = proto.KafkaV1 ts := time.Unix(0, (time.Now().UnixNano()/int64(time.Millisecond))*int64(time.Millisecond)) resp1.Topics[0].Partitions[0].TimeStamp = ts // In kafka >= KafkaV1 there might be only one offset resp1.Topics[0].Partitions[0].Offsets = []int64{92} b1, err := resp1.Bytes() if err != nil { t.Fatal(err) } r1, err := proto.ReadVersionedOffsetResp(bytes.NewReader(b1), resp1.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(resp1, *r1) { t.Fatalf("expected different response \n %#v expected \n %#v", resp1, *r1) } resp2 := resp1 resp2.Version = proto.KafkaV2 resp2.ThrottleTime = 2 * time.Second b2, err := resp2.Bytes() if err != nil { t.Fatal(err) } r2, err := proto.ReadVersionedOffsetResp(bytes.NewReader(b2), resp2.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(resp2, *r2) { t.Fatalf("expected different response \n %#v expected \n %#v", resp2, *r2) } } func TestConnectionProduceNoAck(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } ln, ch, err := testServer2() if err != nil { t.Fatalf("test server error: %s", err) } go func() { time.Sleep(50 * time.Millisecond) ch <- versionResp }() conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } resp, err := conn.Produce(&proto.ProduceReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Compression: proto.CompressionNone, RequiredAcks: proto.RequiredAcksNone, Timeout: time.Second, Topics: []proto.ProduceReqTopic{ { Name: "first", Partitions: []proto.ProduceReqPartition{ { ID: 0, Messages: []*proto.Message{ {Key: []byte("key 1"), Value: []byte("value 1")}, {Key: []byte("key 2"), Value: []byte("value 2")}, }, }, }, }, }, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if resp != nil { t.Fatalf("expected no response, got %#v", resp) } if err := conn.Close(); err != nil { t.Fatalf("could not close kafka connection: %s", err) } if err := ln.Close(); err != nil { t.Fatalf("could not close test server: %s", err) } } func TestClosedConnectionWriter(t *testing.T) { // create test server with no messages, so that any client connection will // be immediately closed ln, err := testServer() if err != nil { t.Fatalf("test server error: %s", err) } conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } longBytes := []byte(strings.Repeat("xxxxxxxxxxxxxxxxxxxxxx", 1000)) req := proto.ProduceReq{ RequestHeader: proto.RequestHeader{ClientID: "test-client"}, Compression: proto.CompressionNone, RequiredAcks: proto.RequiredAcksAll, Timeout: 100, Topics: []proto.ProduceReqTopic{ { Name: "test-topic", Partitions: []proto.ProduceReqPartition{ { ID: 0, Messages: []*proto.Message{ {Value: longBytes}, }, }, }, }, }, } for i := 0; i < 10; i++ { if _, err := conn.Produce(&req); err == nil { t.Fatal("message publishing after closing connection should not be possible") } } // although we produced ten requests, because connection is closed, no // response channel should be registered conn.mu.Lock() defer conn.mu.Unlock() if len(conn.respc) != 0 { t.Fatalf("expected 0 waiting responses, got %d", len(conn.respc)) } } func TestClosedConnectionReader(t *testing.T) { // create test server with no messages, so that any client connection will // be immediately closed ln, err := testServer() if err != nil { t.Fatalf("test server error: %s", err) } conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } req := &proto.FetchReq{ RequestHeader: proto.RequestHeader{ClientID: "test-client"}, MaxWaitTime: 100, MinBytes: 0, Topics: []proto.FetchReqTopic{ { Name: "my-topic", Partitions: []proto.FetchReqPartition{ { ID: 0, FetchOffset: 1, MaxBytes: 100000, }, }, }, }, } for i := 0; i < 10; i++ { if _, err := conn.Fetch(req); err == nil { t.Fatal("fetching from closed connection succeeded") } } // although we produced ten requests, because connection is closed, no // response channel should be registered conn.mu.Lock() defer conn.mu.Unlock() if len(conn.respc) != 0 { t.Fatalf("expected 0 waiting responses, got %d", len(conn.respc)) } } func TestConnectionReaderAfterEOF(t *testing.T) { ln, err := testServer3() if err != nil { t.Fatalf("test server error: %s", err) } defer func() { _ = ln.Close() }() conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } req := &proto.FetchReq{ RequestHeader: proto.RequestHeader{ClientID: "test-client"}, MaxWaitTime: 100, MinBytes: 0, Topics: []proto.FetchReqTopic{ { Name: "my-topic", Partitions: []proto.FetchReqPartition{ { ID: 0, FetchOffset: 1, MaxBytes: 100000, }, }, }, }, } if _, err := conn.Fetch(req); err == nil { t.Fatal("fetching from closed connection succeeded") } // Wait until testServer3 closes connection time.Sleep(time.Millisecond * 50) if _, err := conn.Fetch(req); err == nil { t.Fatal("fetching from closed connection succeeded") } } func TestNoServerResponse(t *testing.T) { ln, err := testSilentServer() if err != nil { t.Fatalf("test server error: %s", err) } conn, err := newTCPConnection(ln.Addr().String(), time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } _, err = conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Topics: []string{"first", "second"}, }) if err == nil { t.Fatalf("expected timeout error, did not happen") } if err := conn.Close(); err != nil { t.Fatalf("could not close kafka connection: %s", err) } if err := ln.Close(); err != nil { t.Fatalf("could not close test server: %s", err) } } func TestTLSConnection(t *testing.T) { versionResp := &proto.APIVersionsResp{ CorrelationID: 1, } resp1 := &proto.MetadataResp{ CorrelationID: 2, Brokers: []proto.MetadataRespBroker{ { NodeID: 666, Host: "example.com", Port: 999, }, }, Topics: []proto.MetadataRespTopic{ { Name: "foo", Partitions: []proto.MetadataRespPartition{ { ID: 7, Leader: 7, Replicas: []int32{7}, Isrs: []int32{7}, }, }, }, }, } ln, err := testTLSServer(versionResp, resp1) if err != nil { t.Fatalf("test server error: %s", err) } tlsConf, err := getTLSConf() if err != nil { t.Fatalf("cannot get tls parametes: %s", err) } _ = tlsConf conn, err := newTLSConnection(ln.Addr().String(), tlsConf.ca, tlsConf.cert, tlsConf.key, time.Second, time.Second) if err != nil { t.Fatalf("could not connect to test server: %s", err) } resp, err := conn.Metadata(&proto.MetadataReq{ RequestHeader: proto.RequestHeader{ClientID: "tester"}, Topics: []string{"first", "second"}, }) if err != nil { t.Fatalf("could not fetch response: %s", err) } if !reflect.DeepEqual(resp, resp1) { t.Fatalf("expected different response %#v", resp) } if err := conn.Close(); err != nil { t.Fatalf("could not close kafka connection: %s", err) } if err := ln.Close(); err != nil { t.Fatalf("could not close test server: %s", err) } } kafka-2.1.1/v2/distributing_producer.go000066400000000000000000000114061356004474300200700ustar00rootroot00000000000000package kafka import ( "errors" "fmt" "hash/fnv" "math/rand" "sync" "time" "github.com/optiopay/kafka/v2/proto" ) // DistributingProducer is the interface similar to Producer, but never require // to explicitly specify partition. // // Distribute writes messages to the given topic, automatically choosing // partition, returning the post-commit offset and any error encountered. The // offset of each message is also updated accordingly. type DistributingProducer interface { Distribute(topic string, messages ...*proto.Message) (offset int64, err error) } type randomProducer struct { producer Producer partitions int32 rand saferand } // custom math/rand randomizer is not concurrency safe type saferand struct { mu sync.Mutex r *rand.Rand } func (sr *saferand) Intn(n int) int { sr.mu.Lock() res := sr.r.Intn(n) sr.mu.Unlock() return res } // NewRandomProducer wraps given producer and return DistributingProducer that // publish messages to kafka, randomly picking partition number from range // [0, numPartitions) func NewRandomProducer(p Producer, numPartitions int32) DistributingProducer { return &randomProducer{ rand: saferand{r: rand.New(rand.NewSource(time.Now().UnixNano()))}, producer: p, partitions: numPartitions, } } // Distribute write messages to given kafka topic, randomly destination choosing // partition. All messages written within single Produce call are atomically // written to the same destination. func (p *randomProducer) Distribute(topic string, messages ...*proto.Message) (offset int64, err error) { // In the case there are no partitions, which may happen for new topics // when AllowTopicCreation is passed, we will write to partition 0 // since rand.Intn panics with 0 part := 0 if p.partitions > 0 { part = p.rand.Intn(int(p.partitions)) } return p.producer.Produce(topic, int32(part), messages...) } type roundRobinProducer struct { producer Producer partitions int32 mu sync.Mutex next int32 } // NewRoundRobinProducer wraps given producer and return DistributingProducer // that publish messages to kafka, choosing destination partition from cycle // build from [0, numPartitions) range. func NewRoundRobinProducer(p Producer, numPartitions int32) DistributingProducer { return &roundRobinProducer{ producer: p, partitions: numPartitions, next: 0, } } // Distribute write messages to given kafka topic, choosing next destination // partition from internal cycle. All messages written within single Produce // call are atomically written to the same destination. func (p *roundRobinProducer) Distribute(topic string, messages ...*proto.Message) (offset int64, err error) { p.mu.Lock() part := p.next p.next++ if p.next >= p.partitions { p.next = 0 } p.mu.Unlock() return p.producer.Produce(topic, int32(part), messages...) } type hashProducer struct { producer Producer partitions int32 } // NewHashProducer wraps given producer and return DistributingProducer that // publish messages to kafka, computing partition number from message key hash, // using fnv hash and [0, numPartitions) range. func NewHashProducer(p Producer, numPartitions int32) DistributingProducer { return &hashProducer{ producer: p, partitions: numPartitions, } } // Distribute write messages to given kafka topic, computing partition number from // the message key value. Message key must be not nil and all messages written // within single Produce call are atomically written to the same destination. // // All messages passed within single Produce call must hash to the same // destination, otherwise no message is written and error is returned. func (p *hashProducer) Distribute(topic string, messages ...*proto.Message) (offset int64, err error) { if len(messages) == 0 { return 0, errors.New("no messages") } part, err := messageHashPartition(messages[0].Key, p.partitions) if err != nil { return 0, fmt.Errorf("cannot hash message: %s", err) } // make sure that all messages within single call are to the same destination for i := 2; i < len(messages); i++ { mp, err := messageHashPartition(messages[i].Key, p.partitions) if err != nil { return 0, fmt.Errorf("cannot hash message: %s", err) } if part != mp { return 0, errors.New("cannot publish messages to different destinations") } } return p.producer.Produce(topic, part, messages...) } // messageHashPartition compute destination partition number for given key // value and total number of partitions. func messageHashPartition(key []byte, partitions int32) (int32, error) { if key == nil { return 0, errors.New("no key") } hasher := fnv.New32a() if _, err := hasher.Write(key); err != nil { return 0, fmt.Errorf("cannot hash key: %s", err) } sum := int32(hasher.Sum32()) if sum < 0 { sum = -sum } return sum % partitions, nil } kafka-2.1.1/v2/distributing_producer_test.go000066400000000000000000000064541356004474300211360ustar00rootroot00000000000000package kafka import ( "fmt" "sync" "testing" "github.com/optiopay/kafka/v2/proto" ) type recordingProducer struct { sync.Mutex msgs []*proto.Message } func newRecordingProducer() *recordingProducer { return &recordingProducer{msgs: make([]*proto.Message, 0)} } func (p *recordingProducer) Produce(topic string, part int32, msgs ...*proto.Message) (int64, error) { p.Lock() defer p.Unlock() offset := len(p.msgs) p.msgs = append(p.msgs, msgs...) for i, msg := range msgs { msg.Offset = int64(offset + i) msg.Topic = topic msg.Partition = part } return int64(len(p.msgs)), nil } func TestRoundRobinProducer(t *testing.T) { rec := newRecordingProducer() p := NewRoundRobinProducer(rec, 3) data := [][][]byte{ { []byte("a 1"), []byte("a 2"), }, { []byte("b 1"), }, { []byte("c 1"), []byte("c 2"), []byte("c 3"), }, { []byte("d 1"), }, } for i, values := range data { msgs := make([]*proto.Message, len(values)) for i, value := range values { msgs[i] = &proto.Message{Value: value} } if _, err := p.Distribute("test-topic", msgs...); err != nil { t.Errorf("cannot distribute %d message: %s", i, err) } } // a, [0, 1] if rec.msgs[0].Partition != 0 || rec.msgs[1].Partition != 0 { t.Fatalf("expected partition 0, got %d and %d", rec.msgs[0].Partition, rec.msgs[1].Partition) } // b, [2] if rec.msgs[2].Partition != 1 { t.Fatalf("expected partition 1, got %d", rec.msgs[2].Partition) } // c, [3, 4, 5] if rec.msgs[3].Partition != 2 || rec.msgs[4].Partition != 2 { t.Fatalf("expected partition 2, got %d and %d", rec.msgs[3].Partition, rec.msgs[3].Partition) } // d, [6] if rec.msgs[6].Partition != 0 { t.Fatalf("expected partition 0, got %d", rec.msgs[6].Partition) } } func TestHashProducer(t *testing.T) { const parts = 3 rec := newRecordingProducer() p := NewHashProducer(rec, parts) var keys [][]byte for i := 0; i < 30; i++ { keys = append(keys, []byte(fmt.Sprintf("key-%d", i))) } for i, key := range keys { msg := &proto.Message{Key: key} if _, err := p.Distribute("test-topic", msg); err != nil { t.Errorf("cannot distribute %d message: %s", i, err) } } if len(rec.msgs) != len(keys) { t.Fatalf("expected %d messages, got %d", len(keys), len(rec.msgs)) } for i, key := range keys { want, err := messageHashPartition(key, parts) if err != nil { t.Errorf("cannot compute hash: %s", err) continue } if got := rec.msgs[i].Partition; want != got { t.Errorf("expected partition %d, got %d", want, got) } else if got > parts-1 { t.Errorf("number of partitions is %d, but message written to %d", parts, got) } } } func TestRandomProducerIsConcurrencySafe(t *testing.T) { const workers = 100 p := NewRandomProducer(nullproducer{}, 4) start := make(chan struct{}) var wg sync.WaitGroup wg.Add(workers) // spawn worker, each starting to produce at the same time for i := 0; i < workers; i++ { go func() { defer wg.Done() msg := &proto.Message{Value: []byte("value")} <-start for n := 0; n < 1000; n++ { if _, err := p.Distribute("x", msg); err != nil { t.Errorf("cannot distribute: %s", err) } } }() } close(start) wg.Wait() } type nullproducer struct{} func (nullproducer) Produce(topic string, part int32, msgs ...*proto.Message) (int64, error) { return 0, nil } kafka-2.1.1/v2/doc.go000066400000000000000000000004721356004474300142240ustar00rootroot00000000000000/* Package kafka a provides high level client API for Apache Kafka. Use 'Broker' for node connection management, 'Producer' for sending messages, and 'Consumer' for fetching. All those structures implement Client, Consumer and Producer interface, that is also implemented in kafkatest package. */ package kafka kafka-2.1.1/v2/example_test.go000066400000000000000000000055651356004474300161610ustar00rootroot00000000000000package kafka import ( "fmt" "github.com/optiopay/kafka/v2/proto" ) func ExampleConsumer() { // connect to kafka cluster addresses := []string{"localhost:9092", "localhost:9093"} broker, err := Dial(addresses, NewBrokerConf("test")) if err != nil { panic(err) } defer broker.Close() // create new consumer conf := NewConsumerConf("my-messages", 0) conf.StartOffset = StartOffsetNewest consumer, err := broker.Consumer(conf) if err != nil { panic(err) } // read all messages for { msg, err := consumer.Consume() if err != nil { if err == ErrNoData { break } panic(err) } fmt.Printf("message: %#v", msg) } } func ExampleOffsetCoordinator() { // connect to kafka cluster addresses := []string{"localhost:9092", "localhost:9093"} broker, err := Dial(addresses, NewBrokerConf("test")) if err != nil { panic(err) } defer broker.Close() // create offset coordinator and customize configuration conf := NewOffsetCoordinatorConf("my-consumer-group") conf.RetryErrLimit = 20 coordinator, err := broker.OffsetCoordinator(conf) if err != nil { panic(err) } // write consumed message offset for topic/partition if err := coordinator.Commit("my-topic", 0, 12); err != nil { panic(err) } // get latest consumed offset for given topic/partition off, _, err := coordinator.Offset("my-topic", 0) if err != nil { panic(err) } if off != 12 { panic(fmt.Sprintf("offset is %d, not 12", off)) } } func ExampleProducer() { // connect to kafka cluster addresses := []string{"localhost:9092", "localhost:9093"} broker, err := Dial(addresses, NewBrokerConf("test")) if err != nil { panic(err) } defer broker.Close() // create new producer conf := NewProducerConf() conf.RequiredAcks = proto.RequiredAcksLocal // write two messages to kafka using single call to make it atomic producer := broker.Producer(conf) messages := []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, } if _, err := producer.Produce("my-messages", 0, messages...); err != nil { panic(err) } } func ExampleMerge() { // connect to kafka cluster addresses := []string{"localhost:9092", "localhost:9093"} broker, err := Dial(addresses, NewBrokerConf("test")) if err != nil { panic(err) } defer broker.Close() topics := []string{"fruits", "vegetables"} fetchers := make([]Consumer, len(topics)) // create consumers for different topics for i, topic := range topics { conf := NewConsumerConf(topic, 0) conf.RetryLimit = 20 conf.StartOffset = StartOffsetNewest consumer, err := broker.Consumer(conf) if err != nil { panic(err) } fetchers[i] = consumer } // merge all created consumers (they don't even have to belong to the same broker!) mx := Merge(fetchers...) defer mx.Close() // consume messages from all sources for { msg, err := mx.Consume() if err != nil { panic(err) } fmt.Printf("message: %#v", msg) } } kafka-2.1.1/v2/go.mod000066400000000000000000000002051356004474300142300ustar00rootroot00000000000000module github.com/optiopay/kafka/v2 go 1.13 require ( github.com/fsouza/go-dockerclient v1.4.4 github.com/golang/snappy v0.0.1 ) kafka-2.1.1/v2/go.sum000066400000000000000000000203251356004474300142620ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04= github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= kafka-2.1.1/v2/integration/000077500000000000000000000000001356004474300154505ustar00rootroot00000000000000kafka-2.1.1/v2/integration/README.md000066400000000000000000000006461356004474300167350ustar00rootroot00000000000000# integration Integration with kafka & zoopeeker test helpers. `KafkaCluster` depends on `docker` and `docker-compose` commands. **IMPORTANT**: Make sure to update `KAFKA_ADVERTISED_HOST_NAME` in `kafka-docker/docker-compose.yml` before running tests. ## kafka-docker [kafka-docker](/integration/kafka-docker) directory is copy of [wurstmeister/kafka-docker](https://github.com/wurstmeister/kafka-docker) repository. kafka-2.1.1/v2/integration/broker_test.go000066400000000000000000000277061356004474300203360ustar00rootroot00000000000000package integration import ( "reflect" "strings" "testing" "time" "github.com/optiopay/kafka/v2" "github.com/optiopay/kafka/v2/proto" ) func TestProduceAndConsume(t *testing.T) { IntegrationTest(t) topics := []string{"Topic3", "Topic4"} cluster := NewKafkaCluster("kafka-docker/", 4) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } defer func() { _ = cluster.Stop() }() if err := cluster.WaitUntilReady(); err != nil { t.Fatal(err) } bconf := kafka.NewBrokerConf("producer-broken-connection") bconf.Logger = &testLogger{t} addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { t.Fatalf("cannot connect to cluster (%q): %s", addrs, err) } defer broker.Close() // produce big message to enforce TCP buffer flush m := proto.Message{ //Value: []byte(strings.Repeat("producer broken connection message ", 1000)), Value: []byte("hello world"), } pconf := kafka.NewProducerConf() producer := broker.Producer(pconf) // send message to all topics to make sure it's working for _, name := range topics { if _, err := producer.Produce(name, 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", name, err) } } time.Sleep(5 * time.Second) // make sure data was persisted for _, name := range topics { consumer, err := broker.Consumer(kafka.NewConsumerConf(name, 0)) if err != nil { t.Errorf("cannot create consumer for %q: %s", name, err) continue } m1, err := consumer.Consume() if err != nil { t.Errorf("cannot consume %d message from %q: %s", 0, name, err) } else { if !reflect.DeepEqual(m.Value, m1.Value) { t.Errorf("Got different message. Wait:\n%#+v got:\n%#+v", m.Value, m1.Value) } } } // check if offsets are correct for _, name := range topics { offe, err := broker.OffsetEarliest(name, 0) if err != nil { t.Fatal(err) } if offe != 0 { t.Fatalf("Should get OffsetEarliest == 0 Got %#v instead ", offe) } offl, err := broker.OffsetLatest(name, 0) if err != nil { t.Fatal(err) } if offl != 1 { t.Fatalf("Should get OffsetLatest == 1. Got %#v instead ", offl) } } } func TestCompression(t *testing.T) { IntegrationTest(t) topics := []string{"Topic3", "Topic4"} cluster := NewKafkaCluster("kafka-docker/", 4) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } defer func() { _ = cluster.Stop() }() if err := cluster.WaitUntilReady(); err != nil { t.Fatal(err) } bconf := kafka.NewBrokerConf("producer-broken-connection") bconf.Logger = &testLogger{t} addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { t.Fatalf("cannot connect to cluster (%q): %s", addrs, err) } defer broker.Close() // produce big message to enforce TCP buffer flush m := proto.Message{ //Value: []byte(strings.Repeat("producer broken connection message ", 1000)), Value: []byte("hello world"), } // Use GZIP compression for Topic3 pconf := kafka.NewProducerConf() pconf.Compression = proto.CompressionGzip producer := broker.Producer(pconf) // send message to all topics to make sure it's working //for i := 0; i < 2; i++ { if _, err := producer.Produce(topics[0], 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", topics[0], err) } // Use Snappy compression for Topic4 pconf = kafka.NewProducerConf() pconf.Compression = proto.CompressionSnappy producer = broker.Producer(pconf) if _, err := producer.Produce(topics[1], 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", topics[1], err) } time.Sleep(5 * time.Second) // make sure data was persisted for _, name := range topics { consumer, err := broker.Consumer(kafka.NewConsumerConf(name, 0)) if err != nil { t.Errorf("cannot create consumer for %q: %s", name, err) continue } //for i := 0; i < 2; i++ { m1, err := consumer.Consume() if err != nil { t.Errorf("cannot consume %d message from %q: %s", 0, name, err) } else { if !reflect.DeepEqual(m.Value, m1.Value) { t.Errorf("Got different message. Wait:\n%#+v got:\n%#+v", m.Value, m1.Value) } } } } func TestProducerBrokenConnection(t *testing.T) { IntegrationTest(t) topics := []string{"Topic3", "Topic4"} cluster := NewKafkaCluster("kafka-docker/", 4) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } defer func() { _ = cluster.Stop() }() if err := cluster.WaitUntilReady(); err != nil { t.Fatal(err) } bconf := kafka.NewBrokerConf("producer-broken-connection") bconf.Logger = &testLogger{t} addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { t.Fatalf("cannot connect to cluster (%q): %s", addrs, err) } defer broker.Close() // produce big message to enforce TCP buffer flush m := proto.Message{ Value: []byte(strings.Repeat("producer broken connection message ", 1000)), } pconf := kafka.NewProducerConf() producer := broker.Producer(pconf) // send message to all topics to make sure it's working for _, name := range topics { if _, err := producer.Produce(name, 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", name, err) } } // close two kafka clusters and publish to all 3 topics - 2 of them should // retry sending, because lack of leader makes the request fail // // request should not succeed until nodes are back - bring them back after // small delay and make sure producing was successful containers, err := cluster.Containers() if err != nil { t.Fatalf("cannot get containers: %s", err) } var stopped []*Container for _, container := range containers { if container.RunningKafka() { if err := container.Kill(); err != nil { t.Fatalf("cannot kill %q kafka container: %s", container.ID, err) } stopped = append(stopped, container) } if len(stopped) == 2 { break } } // bring stopped containers back errc := make(chan error) go func() { time.Sleep(500 * time.Millisecond) for _, container := range stopped { if err := container.Start(); err != nil { errc <- err } } close(errc) }() // send message to all topics to make sure it's working for _, name := range topics { if _, err := producer.Produce(name, 0, &m); err != nil { t.Errorf("cannot produce to %q: %s", name, err) } } for err := range errc { t.Errorf("cannot start container: %s", err) } // make sure data was persisted for _, name := range topics { consumer, err := broker.Consumer(kafka.NewConsumerConf(name, 0)) if err != nil { t.Errorf("cannot create consumer for %q: %s", name, err) continue } for i := 0; i < 2; i++ { if _, err := consumer.Consume(); err != nil { t.Errorf("cannot consume %d message from %q: %s", i, name, err) } } } } func TestConsumerBrokenConnection(t *testing.T) { IntegrationTest(t) const msgPerTopic = 10 topics := []string{"Topic3", "Topic4"} cluster := NewKafkaCluster("kafka-docker/", 5) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } defer func() { _ = cluster.Stop() }() if err := cluster.WaitUntilReady(); err != nil { t.Fatal(err) } bconf := kafka.NewBrokerConf("producer-broken-connection") bconf.Logger = &testLogger{t} addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { t.Fatalf("cannot connect to cluster (%q): %s", addrs, err) } defer broker.Close() // produce big message to enforce TCP buffer flush m := proto.Message{ Value: []byte(strings.Repeat("consumer broken connection message ", 1000)), } pconf := kafka.NewProducerConf() producer := broker.Producer(pconf) // send message to all topics for _, name := range topics { for i := 0; i < msgPerTopic; i++ { if _, err := producer.Produce(name, 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", name, err) } } } // close two kafka clusters and publish to all 3 topics - 2 of them should // retry sending, because lack of leader makes the request fail // // request should not succeed until nodes are back - bring them back after // small delay and make sure producing was successful containers, err := cluster.Containers() if err != nil { t.Fatalf("cannot get containers: %s", err) } var stopped []*Container for _, container := range containers { if container.RunningKafka() { if err := container.Kill(); err != nil { t.Fatalf("cannot kill %q kafka container: %s", container.ID, err) } stopped = append(stopped, container) } if len(stopped) == 2 { break } } // bring stopped containers back errc := make(chan error) go func() { time.Sleep(500 * time.Millisecond) for _, container := range stopped { if err := container.Start(); err != nil { errc <- err } } close(errc) }() time.Sleep(5 * time.Second) // make sure data was persisted for _, name := range topics { consumer, err := broker.Consumer(kafka.NewConsumerConf(name, 0)) if err != nil { t.Errorf("cannot create consumer for %q: %s", name, err) continue } for i := 0; i < msgPerTopic; i++ { if _, err := consumer.Consume(); err != nil { t.Errorf("cannot consume %d message from %q: %s", i, name, err) } } } for err := range errc { t.Errorf("cannot start container: %s", err) } } func TestNewTopic(t *testing.T) { IntegrationTest(t) const msgPerTopic = 10 topic := "NewTopic" cluster := NewKafkaCluster("kafka-docker/", 1) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } defer func() { _ = cluster.Stop() }() // We cannot use here cluster.WaitUntilReady() because // it waits until topics will be created. But in this // tests we create cluster with 1 cluster node only, // which means that topic creation will fail // because they require 4 partitions // So we can only wait and hope that 10 seconds is enough time.Sleep(10 * time.Second) //time.Sleep(5 * time.Second) // go func() { // logCmd := cluster.cmd("docker-compose", "logs") // if err := logCmd.Run(); err != nil { // panic(err) // } // }() bconf := kafka.NewBrokerConf("producer-new-topic") bconf.Logger = &testLogger{t} addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { t.Fatalf("cannot connect to cluster (%q): %s", addrs, err) } defer broker.Close() topicInfo := proto.TopicInfo{ Topic: topic, NumPartitions: 1, ReplicationFactor: 1, } resp, err := broker.CreateTopic([]proto.TopicInfo{topicInfo}, 10*time.Second, false) if err != nil { t.Fatal(err) } for _, e := range resp.TopicErrors { if e.ErrorCode != 0 { t.Fatalf("Got error on topic creation %#+v", e) } } m := proto.Message{ Value: []byte("Test message"), } pconf := kafka.NewProducerConf() producer := broker.Producer(pconf) if _, err := producer.Produce(topic, 0, &m); err != nil { t.Fatalf("cannot produce to %q: %s", topic, err) } consumer, err := broker.Consumer(kafka.NewConsumerConf(topic, 0)) if err != nil { t.Fatalf("cannot create consumer for %q: %s", topic, err) } if _, err := consumer.Consume(); err != nil { t.Errorf("cannot consume message from %q: %s", topic, err) } } type testLogger struct { *testing.T } func (tlog *testLogger) Debug(msg string, keyvals ...interface{}) { args := append([]interface{}{msg}, keyvals...) tlog.Log(args...) } func (tlog *testLogger) Info(msg string, keyvals ...interface{}) { args := append([]interface{}{msg}, keyvals...) tlog.Log(args...) } func (tlog *testLogger) Warn(msg string, keyvals ...interface{}) { args := append([]interface{}{msg}, keyvals...) tlog.Log(args...) } func (tlog *testLogger) Error(msg string, keyvals ...interface{}) { args := append([]interface{}{msg}, keyvals...) tlog.Log(args...) } kafka-2.1.1/v2/integration/cluster.go000066400000000000000000000145461356004474300174720ustar00rootroot00000000000000package integration import ( "bytes" "fmt" "os/exec" "strings" "sync" "time" "github.com/fsouza/go-dockerclient" "github.com/optiopay/kafka/v2" "testing" ) type KafkaCluster struct { // cluster size == number of kafka nodes size int kafkaDockerDir string verbose bool mu sync.Mutex containers []*Container } type Container struct { cluster *KafkaCluster *docker.Container } func NewKafkaCluster(kafkaDockerDir string, size int) *KafkaCluster { if size != 1 && size < 4 { fmt.Println("WARNING: creating cluster smaller than 4 nodes is not sufficient for all topics") } return &KafkaCluster{ kafkaDockerDir: kafkaDockerDir, size: size, verbose: testing.Verbose(), } } // RunningKafka returns true if container is running kafka node func (c *Container) RunningKafka() bool { return c.Config.Image == "kafkadocker_kafka" } // Start starts current container func (c *Container) Start() error { return c.cluster.ContainerStart(c.ID) } // Stop stops current container func (c *Container) Stop() error { return c.cluster.ContainerStop(c.ID) } func (c *Container) Kill() error { return c.cluster.ContainerKill(c.ID) } // Start start zookeeper and kafka nodes using docker-compose command. Upon // successful process spawn, cluster is scaled to required amount of nodes. func (cluster *KafkaCluster) Start() error { cluster.mu.Lock() defer cluster.mu.Unlock() // ensure cluster is not running if err := cluster.Stop(); err != nil { return fmt.Errorf("cannot ensure stop cluster: %s", err) } if err := cluster.removeStoppedContainers(); err != nil { return fmt.Errorf("cannot cleanup dead containers: %s", err) } args := []string{"--no-ansi", "up", "-d", "--scale", fmt.Sprintf("kafka=%d", cluster.size)} upCmd, _, stderr := cluster.cmd("docker-compose", args...) err := upCmd.Run() if err != nil { return fmt.Errorf("docker-compose error: %s, %s", err, stderr) } containers, err := cluster.Containers() if err != nil { _ = cluster.Stop() return fmt.Errorf("cannot get containers info: %s", err) } cluster.containers = containers return nil } func (cluster *KafkaCluster) WaitUntilReady() error { for { bconf := kafka.NewBrokerConf("waiter") addrs, err := cluster.KafkaAddrs() if err != nil { return fmt.Errorf("cannot get kafka address: %s", err) } broker, err := kafka.Dial(addrs, bconf) if err != nil { return fmt.Errorf("cannot connect to cluster (%q): %s", addrs, err) } met, err := broker.Metadata() if err != nil { return fmt.Errorf("Cannot get metadata : %s", err) } if len(met.Topics) > 0 { return nil } broker.Close() time.Sleep(time.Second) } } // Containers inspect all containers running within cluster and return // information about them. func (cluster *KafkaCluster) Containers() ([]*Container, error) { psCmd, stdout, stderr := cluster.cmd("docker-compose", "ps", "-q") err := psCmd.Run() if err != nil { return nil, fmt.Errorf("Cannot list processes: %s, %s", err, stderr.String()) } containerIDs := stdout.String() var containers []*Container endpoint := "unix:///var/run/docker.sock" client, err := docker.NewClient(endpoint) if err != nil { return nil, fmt.Errorf("Cannot open connection to docker %s", err) } for _, containerID := range strings.Split(strings.TrimSpace(containerIDs), "\n") { if containerID == "" { continue } container, err := client.InspectContainer(containerID) if err != nil { return nil, fmt.Errorf("Cannot inspect docker container %s", err) } containers = append(containers, &Container{cluster: cluster, Container: container}) } return containers, nil } // Stop stop all services running for the cluster by sending SIGINT to // docker-compose process. func (cluster *KafkaCluster) Stop() error { cmd, _, stderr := cluster.cmd("docker-compose", "stop", "-t", "0") if err := cmd.Run(); err != nil { return fmt.Errorf("docker-compose stop failed: %s, %s", err, stderr) } _ = cluster.removeStoppedContainers() return nil } // KafkaAddrs return list of kafka node addresses as strings, in form // : func (cluster *KafkaCluster) KafkaAddrs() ([]string, error) { containers, err := cluster.Containers() if err != nil { return nil, fmt.Errorf("Cannot get containers info: %s", err) } addrs := make([]string, 0) for _, container := range containers { if _, ok := container.NetworkSettings.Ports["9092/tcp"]; ok { for _, network := range container.NetworkSettings.Networks { if network.IPAddress != "" { addrs = append(addrs, fmt.Sprintf("%s:%s", network.IPAddress, "9092")) break } } } } return addrs, nil } func (cluster *KafkaCluster) ContainerNetworkIP(container Container, network string) (string, error) { inspectCmd, stdout, stderr := cluster.cmd("docker", "inspect", ".NetworkSettings.Networks."+network+".IPAddress", container.ID) err := inspectCmd.Run() if err != nil { return "", fmt.Errorf("Cannot inspect %#v: %s, %s", container, err, stderr) } cleanIP := strings.TrimSpace(stdout.String()) return cleanIP, nil } func (cluster *KafkaCluster) ContainerStop(containerID string) error { stopCmd, _, stderr := cluster.cmd("docker", "stop", containerID) err := stopCmd.Run() if err != nil { return fmt.Errorf("cannot stop %q container: %s, %s", containerID, err, stderr) } return nil } func (cluster *KafkaCluster) ContainerKill(containerID string) error { killCmd, _, stderr := cluster.cmd("docker", "kill", containerID) err := killCmd.Run() if err != nil { return fmt.Errorf("cannot kill %q container: %s, %s", containerID, err, stderr) } return nil } func (cluster *KafkaCluster) ContainerStart(containerID string) error { startCmd, _, stderr := cluster.cmd("docker", "start", containerID) err := startCmd.Run() if err != nil { return fmt.Errorf("cannot start %q container: %s, %s", containerID, err, stderr) } return nil } func (cluster *KafkaCluster) cmd(name string, args ...string) (*exec.Cmd, *bytes.Buffer, *bytes.Buffer) { cmd := exec.Command(name, args...) var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr cmd.Dir = cluster.kafkaDockerDir return cmd, &stdout, &stderr } func (cluster *KafkaCluster) removeStoppedContainers() error { rmCmd, _, stderr := cluster.cmd("docker-compose", "rm", "-f") err := rmCmd.Run() if err != nil { return fmt.Errorf("docker-compose rm error: %s, %s", err, stderr) } return nil } kafka-2.1.1/v2/integration/cluster_test.go000066400000000000000000000052441356004474300205240ustar00rootroot00000000000000package integration import ( "os" "os/exec" "strconv" "testing" ) // Integration test skip test if WITH_INTEGRATION environment variable was not // set to true. func IntegrationTest(t *testing.T) { if !hasDocker() { t.Skip("Integration test. docker and/or docker-compose tools not available") } if ok, _ := strconv.ParseBool(os.Getenv("WITH_INTEGRATION")); !ok { t.Skip("Integration test. Set WITH_INTEGRATION=true to run.") } } func hasDocker() bool { if err := exec.Command("docker", "--version").Run(); err != nil { return false } if err := exec.Command("docker-compose", "--version").Run(); err != nil { return false } return true } func TestKafkaCluster(t *testing.T) { IntegrationTest(t) const clusterSize = 4 cluster := NewKafkaCluster("kafka-docker/", clusterSize) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } addrs, err := cluster.KafkaAddrs() if err != nil { t.Fatalf("cannot get kafka cluster addresses: %s", err) } if len(addrs) != clusterSize { t.Fatalf("expected %d addresses, got %d (%v)", clusterSize, len(addrs), addrs) } if err := cluster.Stop(); err != nil { t.Fatalf("cannot stop kafka cluster: %s", err) } } func TestContainerRestart(t *testing.T) { IntegrationTest(t) cluster := NewKafkaCluster("kafka-docker/", 4) if err := cluster.Start(); err != nil { t.Fatalf("cannot start kafka cluster: %s", err) } containers, err := cluster.Containers() if err != nil { t.Fatalf("cannot get containers info: %s", err) } // 4 kafka + zookeeper if len(containers) != 5 { t.Fatalf("expected 5 containers, got %d", len(containers)) } // first stop all zookeeper containers for _, container := range containers { if container.RunningKafka() { continue } if err := container.Stop(); err != nil { t.Fatalf("cannot stop %q container: %s", container.ID, err) } } // then stop all kafka containers for _, container := range containers { if !container.RunningKafka() { continue } if err := container.Stop(); err != nil { t.Fatalf("cannot stop %q container: %s", container.ID, err) } } // first start all zookeeper containers for _, container := range containers[1:] { if container.RunningKafka() { continue } if err := container.Start(); err != nil { t.Fatalf("cannot start %q container: %s", container.ID, err) } } // then start all kafka containers for _, container := range containers[1:] { if !container.RunningKafka() { continue } if err := container.Start(); err != nil { t.Fatalf("cannot start %q container: %s", container.ID, err) } } if err := cluster.Stop(); err != nil { t.Fatalf("cannot stop kafka cluster: %s", err) } } kafka-2.1.1/v2/integration/kafka-docker/000077500000000000000000000000001356004474300177725ustar00rootroot00000000000000kafka-2.1.1/v2/integration/kafka-docker/Dockerfile000066400000000000000000000015001356004474300217600ustar00rootroot00000000000000FROM anapsix/alpine-java ARG kafka_version=1.0.0 ARG scala_version=2.12 MAINTAINER wurstmeister ENV KAFKA_VERSION=$kafka_version \ SCALA_VERSION=$scala_version \ KAFKA_HOME=/opt/kafka \ PATH=${PATH}:${KAFKA_HOME}/bin COPY download-kafka.sh start-kafka.sh broker-list.sh create-topics.sh /tmp/ RUN apk add --update unzip wget curl docker jq coreutils \ && chmod a+x /tmp/*.sh \ && mv /tmp/start-kafka.sh /tmp/broker-list.sh /tmp/create-topics.sh /usr/bin \ && /tmp/download-kafka.sh \ && tar xfz /tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz -C /opt \ && rm /tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz \ && ln -s /opt/kafka_${SCALA_VERSION}-${KAFKA_VERSION} /opt/kafka \ && rm /tmp/* VOLUME ["/kafka"] # Use "exec" form so that it runs as PID 1 (useful for graceful shutdown) CMD ["start-kafka.sh"] kafka-2.1.1/v2/integration/kafka-docker/LICENSE000066400000000000000000000260751356004474300210110ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. kafka-2.1.1/v2/integration/kafka-docker/README.md000066400000000000000000000226021356004474300212530ustar00rootroot00000000000000[![Docker Pulls](https://img.shields.io/docker/pulls/wurstmeister/kafka.svg)](https://hub.docker.com/r/wurstmeister/kafka/) [![Docker Stars](https://img.shields.io/docker/stars/wurstmeister/kafka.svg)](https://hub.docker.com/r/wurstmeister/kafka/) [![](https://badge.imagelayers.io/wurstmeister/kafka:latest.svg)](https://imagelayers.io/?images=wurstmeister/kafka:latest) kafka-docker ============ Dockerfile for [Apache Kafka](http://kafka.apache.org/) The image is available directly from [Docker Hub](https://hub.docker.com/r/wurstmeister/kafka/) ## Pre-Requisites - install docker-compose [https://docs.docker.com/compose/install/](https://docs.docker.com/compose/install/) - modify the ```KAFKA_ADVERTISED_HOST_NAME``` in ```docker-compose.yml``` to match your docker host IP (Note: Do not use localhost or 127.0.0.1 as the host ip if you want to run multiple brokers.) - if you want to customize any Kafka parameters, simply add them as environment variables in ```docker-compose.yml```, e.g. in order to increase the ```message.max.bytes``` parameter set the environment to ```KAFKA_MESSAGE_MAX_BYTES: 2000000```. To turn off automatic topic creation set ```KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'``` - Kafka's log4j usage can be customized by adding environment variables prefixed with ```LOG4J_```. These will be mapped to ```log4j.properties```. For example: ```LOG4J_LOGGER_KAFKA_AUTHORIZER_LOGGER=DEBUG, authorizerAppender``` ## Usage Start a cluster: - ```docker-compose up -d ``` Add more brokers: - ```docker-compose scale kafka=3``` Destroy a cluster: - ```docker-compose stop``` ## Note The default ```docker-compose.yml``` should be seen as a starting point. By default each broker will get a new port number and broker id on restart. Depending on your use case this might not be desirable. If you need to use specific ports and broker ids, modify the docker-compose configuration accordingly, e.g. [docker-compose-single-broker.yml](https://github.com/wurstmeister/kafka-docker/blob/master/docker-compose-single-broker.yml): - ```docker-compose -f docker-compose-single-broker.yml up``` ## Broker IDs You can configure the broker id in different ways 1. explicitly, using ```KAFKA_BROKER_ID``` 2. via a command, using ```BROKER_ID_COMMAND```, e.g. ```BROKER_ID_COMMAND: "hostname | awk -F'-' '{print $2}'"``` If you don't specify a broker id in your docker-compose file, it will automatically be generated (see [https://issues.apache.org/jira/browse/KAFKA-1070](https://issues.apache.org/jira/browse/KAFKA-1070). This allows scaling up and down. In this case it is recommended to use the ```--no-recreate``` option of docker-compose to ensure that containers are not re-created and thus keep their names and ids. ## Automatically create topics If you want to have kafka-docker automatically create topics in Kafka during creation, a ```KAFKA_CREATE_TOPICS``` environment variable can be added in ```docker-compose.yml```. Here is an example snippet from ```docker-compose.yml```: environment: KAFKA_CREATE_TOPICS: "Topic1:1:3,Topic2:1:1:compact" ```Topic 1``` will have 1 partition and 3 replicas, ```Topic 2``` will have 1 partition, 1 replica and a `cleanup.policy` set to `compact`. ## Advertised hostname You can configure the advertised hostname in different ways 1. explicitly, using ```KAFKA_ADVERTISED_HOST_NAME``` 2. via a command, using ```HOSTNAME_COMMAND```, e.g. ```HOSTNAME_COMMAND: "route -n | awk '/UG[ \t]/{print $$2}'"``` When using commands, make sure you review the "Variable Substitution" section in [https://docs.docker.com/compose/compose-file/](https://docs.docker.com/compose/compose-file/) If ```KAFKA_ADVERTISED_HOST_NAME``` is specified, it takes precedence over ```HOSTNAME_COMMAND``` For AWS deployment, you can use the Metadata service to get the container host's IP: ``` HOSTNAME_COMMAND=wget -t3 -T2 -qO- http://169.254.169.254/latest/meta-data/local-ipv4 ``` Reference: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html ## Broker Rack You can configure the broker rack affinity in different ways 1. explicitly, using ```KAFKA_BROKER_RACK``` 2. via a command, using ```RACK_COMMAND```, e.g. ```RACK_COMMAND: "curl http://169.254.169.254/latest/meta-data/placement/availability-zone"``` In the above example the AWS metadata service is used to put the instance's availability zone in the ```broker.rack``` property. ## JMX For monitoring purposes you may wish to configure JMX. Additional to the standard JMX parameters, problems could arise from the underlying RMI protocol used to connect * java.rmi.server.hostname - interface to bind listening port * com.sun.management.jmxremote.rmi.port - The port to service RMI requests For example, to connect to a kafka running locally (assumes exposing port 1099) KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099" JMX_PORT: 1099 Jconsole can now connect at ```jconsole 192.168.99.100:1099``` ## Listener Configuration Newer versions of Kafka have deprecated ```advertised.host.name``` and ```advertised.port``` in favor of a more flexible listener configuration that supports multiple listeners using the same or different protocols. This image supports up to three listeners to be configured automatically as shown below. Note: if the below listener configuration is not used, legacy conventions for "advertised.host.name" and "advertised.port" still operate without change. 1. Use ```KAFKA_LISTENER_SECURITY_PROTOCOL_MAP``` to configure an INSIDE, OUTSIDE, and optionally a BROKER protocol. These names are arbitrary but used for consistency and clarity. * ```KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:SSL,BROKER:PLAINTEXT``` configures three listener names, but only the listener named OUTSIDE uses SSL. Note this example does not concern extra steps in configuring SSL on a broker. 2. Use ```KAFKA_ADVERTISED_PROTOCOL_NAME``` to set the name from the protocol map to be used for the "advertised.listeners" property. This is "OUTSIDE" in this example. 3. Use ```KAFKA_PROTOCOL_NAME``` to set the name from the protocol map to be used for the "listeners" property. This is "INSIDE" in this example. 4. Use ```KAFKA_INTER_BROKER_LISTENER_NAME``` to set the name from the protocol map to be used for the "inter.broker.listener.name". This defaults to ```KAFKA_PROTOCOL_NAME``` if not supplied. This is "BROKER" in the example. 5. Use ```KAFKA_ADVERTISED_PORT``` and ```KAFKA_ADVERTISED_HOST_NAME``` (or the ```HOSTNAME_COMMAND``` option) to set the name and port to be used in the ```advertised.listeners``` list. 6. Use ```KAFKA_PORT``` and ```KAFKA_HOST_NAME``` (optional) to set the name (optional) and port to be used in the ```listeners``` list. If ```KAFKA_HOST_NAME``` is not defined, Kafka's reasonable default behavior will be used and is sufficient. Note that ```KAFKA_PORT``` defaults to "9092" if not defined. 7. Use ```KAFKA_INTER_BROKER_LISTENER_PORT``` to set the port number to be used in both ```advertised.listeners``` and ```listeners``` for the Inter-broker listener. The host name for this listener is not configurable. Kafka's reasonable default behavior is used. ### Example Given the environment seen here, the following configuration will be written to the Kafka broker properties. ``` HOSTNAME_COMMAND: curl http://169.254.169.254/latest/meta-data/public-hostname KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT KAFKA_ADVERTISED_PROTOCOL_NAME: OUTSIDE KAFKA_PROTOCOL_NAME: INSIDE KAFKA_ADVERTISED_PORT: 9094 ``` The resulting configuration: ``` advertised.listeners = OUTSIDE://ec2-xx-xx-xxx-xx.us-west-2.compute.amazonaws.com:9094,INSIDE://:9092 listeners = OUTSIDE://:9094,INSIDE://:9092 inter.broker.listener.name = INSIDE ``` ### Rules * No listeners may share a port number. * An advertised.listener must be present by name and port number in the list of listeners. * You must not set "security.inter.broker.protocol" at the same time as using this multiple-listener mechanism. ### Best Practices * Reserve port 9092 for INSIDE listeners. * Reserve port 9093 for BROKER listeners. * Reserve port 9094 for OUTSIDE listeners. ## Docker Swarm Mode The listener configuration above is necessary when deploying Kafka in a Docker Swarm using an overlay network. By separating OUTSIDE and INSIDE listeners, a host can communicate with clients outside the overlay network while still benefiting from it from within the swarm. In addition to the multiple-listener configuration, additional best practices for operating Kafka in a Docker Swarm include: * Use "deploy: global" in a compose file to launch one and only one Kafka broker per swarm node. * Use compose file version '3.2' (minimum Docker version 16.04) and the "long" port definition with the port in "host" mode instead of the default "ingress" load-balanced port binding. This ensures that outside requests are always routed to the correct broker. For example: ``` ports: - target: 9094 published: 9094 protocol: tcp mode: host ``` Older compose files using the short-version of port mapping may encounter Kafka client issues if their connection to individual brokers cannot be guaranteed. See the included sample compose file ```docker-compose-swarm.yml``` ## Tutorial [http://wurstmeister.github.io/kafka-docker/](http://wurstmeister.github.io/kafka-docker/) kafka-2.1.1/v2/integration/kafka-docker/broker-list.sh000077500000000000000000000003251356004474300225660ustar00rootroot00000000000000#!/bin/bash CONTAINERS=$(docker ps | grep 9092 | awk '{print $1}') BROKERS=$(for CONTAINER in $CONTAINERS; do docker port $CONTAINER 9092 | sed -e "s/0.0.0.0:/$HOST_IP:/g"; done) echo $BROKERS | sed -e 's/ /,/g' kafka-2.1.1/v2/integration/kafka-docker/create-topics.sh000077500000000000000000000020361356004474300230740ustar00rootroot00000000000000#!/bin/bash if [[ -z "$KAFKA_CREATE_TOPICS" ]]; then exit 0 fi if [[ -z "$START_TIMEOUT" ]]; then START_TIMEOUT=600 fi start_timeout_exceeded=false count=0 step=10 while netstat -lnt | awk '$4 ~ /:'$KAFKA_PORT'$/ {exit 1}'; do echo "waiting for kafka to be ready" sleep $step; count=$(expr $count + $step) if [ $count -gt $START_TIMEOUT ]; then start_timeout_exceeded=true break fi done if $start_timeout_exceeded; then echo "Not able to auto-create topic (waited for $START_TIMEOUT sec)" exit 1 fi IFS=','; for topicToCreate in $KAFKA_CREATE_TOPICS; do echo "creating topics: $topicToCreate" IFS=':' read -a topicConfig <<< "$topicToCreate" config= if [ -n "${topicConfig[3]}" ]; then config="--config cleanup.policy=${topicConfig[3]}" fi JMX_PORT='' $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $KAFKA_ZOOKEEPER_CONNECT --replication-factor ${topicConfig[2]} --partitions ${topicConfig[1]} --topic "${topicConfig[0]}" $config --if-not-exists & done wait kafka-2.1.1/v2/integration/kafka-docker/docker-compose-single-broker.yml000066400000000000000000000005571356004474300261770ustar00rootroot00000000000000version: '2' services: zookeeper: image: wurstmeister/zookeeper ports: - "2181:2181" kafka: build: . ports: - "9092:9092" environment: KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100 KAFKA_CREATE_TOPICS: "test:1:1" KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 volumes: - /var/run/docker.sock:/var/run/docker.sock kafka-2.1.1/v2/integration/kafka-docker/docker-compose-swarm.yml000066400000000000000000000012301356004474300245520ustar00rootroot00000000000000version: '3.2' services: zookeeper: image: wurstmeister/zookeeper ports: - "2181:2181" kafka: image: ${REPO}kafka-docker:latest ports: - target: 9094 published: 9094 protocol: tcp mode: host environment: HOSTNAME_COMMAND: "docker info | grep ^Name: | cut -d' ' -f 2" KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT KAFKA_ADVERTISED_PROTOCOL_NAME: OUTSIDE KAFKA_ADVERTISED_PORT: 9094 KAFKA_PROTOCOL_NAME: INSIDE KAFKA_PORT: 9092 volumes: - /var/run/docker.sock:/var/run/docker.sock kafka-2.1.1/v2/integration/kafka-docker/docker-compose.yml000066400000000000000000000006061356004474300234310ustar00rootroot00000000000000version: '2' services: zookeeper: image: wurstmeister/zookeeper ports: - "2181:2181" kafka: build: . ports: - "9092" environment: KAFKA_CREATE_TOPICS: "Topic3:1:3,Topic4:1:4" HOSTNAME_COMMAND: "route -n | awk '/UG[ \t]/{print $$2}'" KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 volumes: - /var/run/docker.sock:/var/run/docker.sock kafka-2.1.1/v2/integration/kafka-docker/download-kafka.sh000077500000000000000000000004151356004474300232130ustar00rootroot00000000000000#!/bin/sh mirror=$(curl --stderr /dev/null https://www.apache.org/dyn/closer.cgi\?as_json\=1 | jq -r '.preferred') url="${mirror}kafka/${KAFKA_VERSION}/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" wget -q "${url}" -O "/tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" kafka-2.1.1/v2/integration/kafka-docker/start-kafka-shell.sh000077500000000000000000000002031356004474300236410ustar00rootroot00000000000000#!/bin/bash docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -e HOST_IP=$1 -e ZK=$2 -i -t wurstmeister/kafka /bin/bash kafka-2.1.1/v2/integration/kafka-docker/start-kafka.sh000077500000000000000000000116621356004474300225470ustar00rootroot00000000000000#!/bin/bash if [[ -z "$KAFKA_PORT" ]]; then export KAFKA_PORT=9092 fi create-topics.sh & if [[ -z "$KAFKA_ADVERTISED_PORT" && \ -z "$KAFKA_LISTENERS" && \ -z "$KAFKA_ADVERTISED_LISTENERS" && \ -S /var/run/docker.sock ]]; then export KAFKA_ADVERTISED_PORT=$(docker port `hostname` $KAFKA_PORT | sed -r "s/.*:(.*)/\1/g") fi if [[ -z "$KAFKA_BROKER_ID" ]]; then if [[ -n "$BROKER_ID_COMMAND" ]]; then export KAFKA_BROKER_ID=$(eval $BROKER_ID_COMMAND) else # By default auto allocate broker ID export KAFKA_BROKER_ID=-1 fi fi if [[ -z "$KAFKA_LOG_DIRS" ]]; then export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME" fi if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,) fi if [[ -n "$KAFKA_HEAP_OPTS" ]]; then sed -r -i "s/(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh unset KAFKA_HEAP_OPTS fi if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then export KAFKA_ADVERTISED_HOST_NAME=$(eval $HOSTNAME_COMMAND) fi if [[ -n "$KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" ]]; then if [[ -n "$KAFKA_ADVERTISED_PORT" && -n "$KAFKA_ADVERTISED_PROTOCOL_NAME" ]]; then export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_PROTOCOL_NAME}://${KAFKA_ADVERTISED_HOST_NAME-}:${KAFKA_ADVERTISED_PORT}" export KAFKA_LISTENERS="$KAFKA_ADVERTISED_PROTOCOL_NAME://:$KAFKA_ADVERTISED_PORT" fi if [[ -z "$KAFKA_PROTOCOL_NAME" ]]; then export KAFKA_PROTOCOL_NAME="${KAFKA_ADVERTISED_PROTOCOL_NAME}" fi if [[ -n "$KAFKA_PORT" && -n "$KAFKA_PROTOCOL_NAME" ]]; then export ADD_LISTENER="${KAFKA_PROTOCOL_NAME}://${KAFKA_HOST_NAME-}:${KAFKA_PORT}" fi if [[ -z "$KAFKA_INTER_BROKER_LISTENER_NAME" ]]; then export KAFKA_INTER_BROKER_LISTENER_NAME=$KAFKA_PROTOCOL_NAME fi else #DEFAULT LISTENERS export KAFKA_ADVERTISED_LISTENERS="PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME-}:${KAFKA_ADVERTISED_PORT-$KAFKA_PORT}" export KAFKA_LISTENERS="PLAINTEXT://${KAFKA_HOST_NAME-}:${KAFKA_PORT-9092}" fi if [[ -n "$ADD_LISTENER" && -n "$KAFKA_LISTENERS" ]]; then export KAFKA_LISTENERS="${KAFKA_LISTENERS},${ADD_LISTENER}" fi if [[ -n "$ADD_LISTENER" && -z "$KAFKA_LISTENERS" ]]; then export KAFKA_LISTENERS="${ADD_LISTENER}" fi if [[ -n "$ADD_LISTENER" && -n "$KAFKA_ADVERTISED_LISTENERS" ]]; then export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_LISTENERS},${ADD_LISTENER}" fi if [[ -n "$ADD_LISTENER" && -z "$KAFKA_ADVERTISED_LISTENERS" ]]; then export KAFKA_ADVERTISED_LISTENERS="${ADD_LISTENER}" fi if [[ -n "$KAFKA_INTER_BROKER_LISTENER_NAME" && ! "$KAFKA_INTER_BROKER_LISTENER_NAME"X = "$KAFKA_PROTOCOL_NAME"X ]]; then if [[ -n "$KAFKA_INTER_BROKER_PORT" ]]; then export KAFKA_INTER_BROKER_PORT=$(( $KAFKA_PORT + 1 )) fi export INTER_BROKER_LISTENER="${KAFKA_INTER_BROKER_LISTENER_NAME}://:${KAFKA_INTER_BROKER_PORT}" export KAFKA_LISTENERS="${KAFKA_LISTENERS},${INTER_BROKER_LISTENER}" export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_LISTENERS},${INTER_BROKER_LISTENER}" unset KAFKA_INTER_BROKER_PORT unset KAFKA_SECURITY_INTER_BROKER_PROTOCOL unset INTER_BROKER_LISTENER fi if [[ -n "$RACK_COMMAND" && -z "$KAFKA_BROKER_RACK" ]]; then export KAFKA_BROKER_RACK=$(eval $RACK_COMMAND) fi #Issue newline to config file in case there is not one already echo -e "\n" >> $KAFKA_HOME/config/server.properties unset KAFKA_CREATE_TOPICS unset KAFKA_ADVERTISED_PROTOCOL_NAME unset KAFKA_PROTOCOL_NAME if [[ -n "$KAFKA_ADVERTISED_LISTENERS" ]]; then unset KAFKA_ADVERTISED_PORT unset KAFKA_ADVERTISED_HOST_NAME fi if [[ -n "$KAFKA_LISTENERS" ]]; then unset KAFKA_PORT unset KAFKA_HOST_NAME fi for VAR in `env` do if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .` env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"` if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then sed -r -i "s@(^|^#)($kafka_name)=(.*)@\2=${!env_var}@g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '@' char else echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties fi fi if [[ $VAR =~ ^LOG4J_ ]]; then log4j_name=`echo "$VAR" | sed -r "s/(LOG4J_.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .` log4j_env=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"` if egrep -q "(^|^#)$log4j_name=" $KAFKA_HOME/config/log4j.properties; then sed -r -i "s@(^|^#)($log4j_name)=(.*)@\2=${!log4j_env}@g" $KAFKA_HOME/config/log4j.properties #note that no config values may contain an '@' char else echo "$log4j_name=${!log4j_env}" >> $KAFKA_HOME/config/log4j.properties fi fi done if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then eval $CUSTOM_INIT_SCRIPT fi exec $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties kafka-2.1.1/v2/kafkatest/000077500000000000000000000000001356004474300151025ustar00rootroot00000000000000kafka-2.1.1/v2/kafkatest/broker.go000066400000000000000000000174641356004474300167310ustar00rootroot00000000000000package kafkatest import ( "errors" "fmt" "sync" "time" "github.com/optiopay/kafka/v2" "github.com/optiopay/kafka/v2/proto" ) var ( ErrTimeout = errors.New("timeout") ErrNotImplemented = errors.New("not implemented") // test implementation should implement the interface _ kafka.Client = &Broker{} _ kafka.Producer = &Producer{} _ kafka.Consumer = &Consumer{} ) // Broker is mock version of kafka's broker. It's implementing Broker interface // and provides easy way of mocking server actions. type Broker struct { produced chan *ProducedMessages mu sync.Mutex consumers map[string]map[int32]*Consumer // OffsetEarliestHandler is callback function called whenever // OffsetEarliest method of the broker is called. Overwrite to change // default behaviour -- always returning ErrUnknownTopicOrPartition OffsetEarliestHandler func(string, int32) (int64, error) // OffsetLatestHandler is callback function called whenever OffsetLatest // method of the broker is called. Overwrite to change default behaviour -- // always returning ErrUnknownTopicOrPartition OffsetLatestHandler func(string, int32) (int64, error) } func NewBroker() *Broker { return &Broker{ consumers: make(map[string]map[int32]*Consumer), produced: make(chan *ProducedMessages), } } // Close is no operation method, required by Broker interface. func (b *Broker) Close() { } // OffsetEarliest return result of OffsetEarliestHandler callback set on the // broker. If not set, always return ErrUnknownTopicOrPartition func (b *Broker) OffsetEarliest(topic string, partition int32) (int64, error) { if b.OffsetEarliestHandler != nil { return b.OffsetEarliestHandler(topic, partition) } return 0, proto.ErrUnknownTopicOrPartition } // OffsetLatest return result of OffsetLatestHandler callback set on the // broker. If not set, always return ErrUnknownTopicOrPartition func (b *Broker) OffsetLatest(topic string, partition int32) (int64, error) { if b.OffsetLatestHandler != nil { return b.OffsetLatestHandler(topic, partition) } return 0, proto.ErrUnknownTopicOrPartition } // Consumer returns consumer mock and never error. // // At most one consumer for every topic-partition pair can be created -- // calling this for the same topic-partition will always return the same // consumer instance. func (b *Broker) Consumer(conf kafka.ConsumerConf) (kafka.Consumer, error) { b.mu.Lock() defer b.mu.Unlock() if t, ok := b.consumers[conf.Topic]; ok { if c, ok := t[conf.Partition]; ok { return c, nil } } else { b.consumers[conf.Topic] = make(map[int32]*Consumer) } c := &Consumer{ conf: conf, Broker: b, Messages: make(chan *proto.Message), Errors: make(chan error), } b.consumers[conf.Topic][conf.Partition] = c return c, nil } // Producer returns producer mock instance. func (b *Broker) Producer(kafka.ProducerConf) kafka.Producer { return &Producer{ Broker: b, responseOffset: 1, } } // OffsetCoordinator returns offset coordinator mock instance. It's always // successful, so you can always ignore returned error. func (b *Broker) OffsetCoordinator(conf kafka.OffsetCoordinatorConf) (kafka.OffsetCoordinator, error) { c := &OffsetCoordinator{ Broker: b, conf: conf, } return c, nil } // ReadProducers return ProduceMessages representing produce call of one of // created by broker producers or ErrTimeout. func (b *Broker) ReadProducers(timeout time.Duration) (*ProducedMessages, error) { select { case p := <-b.produced: return p, nil case <-time.After(timeout): return nil, ErrTimeout } } // Consumer mocks kafka's consumer. Use Messages and Errors channels to mock // Consume method results. type Consumer struct { conf kafka.ConsumerConf Broker *Broker // Messages is channel consumed by fetch method call. Pushing message into // this channel will result in Consume method call returning message data. Messages chan *proto.Message // Errors is channel consumed by fetch method call. Pushing error into this // channel will result in Consume method call returning error. Errors chan error } // Consume returns message or error pushed through consumers Messages and Errors // channel. Function call will block until data on at least one of those // channels is available. func (c *Consumer) Consume() (*proto.Message, error) { select { case msg := <-c.Messages: msg.Topic = c.conf.Topic msg.Partition = c.conf.Partition return msg, nil case err := <-c.Errors: return nil, err } } // Producer mocks kafka's producer. type Producer struct { Broker *Broker // responseOffset is offset counter returned and incremented by every // Produce method call. By default set to 1. responseOffset int64 offsetMutex sync.Mutex // ResponseError if set, force Produce method call to instantly return // error, without publishing messages. By default nil. ResponseError error } // ProducedMessages represents all arguments used for single Produce method // call. type ProducedMessages struct { Topic string Partition int32 Messages []*proto.Message } // ResponseOffset returns the offset counter. The counter is // incremented every time the Produce method is called. By default the // counter is set to 1. func (p *Producer) ResponseOffset() int64 { p.offsetMutex.Lock() defer p.offsetMutex.Unlock() return p.responseOffset } // Produce is settings messages Crc and Offset attributes and pushing all // passed arguments to broker. Produce call is blocking until pushed message // will be read with broker's ReadProduces. func (p *Producer) Produce(topic string, partition int32, messages ...*proto.Message) (int64, error) { if p.ResponseError != nil { return 0, p.ResponseError } p.offsetMutex.Lock() defer p.offsetMutex.Unlock() off := p.responseOffset for i, msg := range messages { msg.Offset = off + int64(i) msg.Crc = proto.ComputeCrc(msg, proto.CompressionNone) } p.Broker.produced <- &ProducedMessages{ Topic: topic, Partition: partition, Messages: messages, } p.responseOffset += int64(len(messages)) return off, nil } type OffsetCoordinator struct { conf kafka.OffsetCoordinatorConf Broker *Broker // Offsets is used to store all offset commits when using mocked // coordinator's default behaviour. Offsets map[string]int64 // CommitHandler is callback function called whenever Commit method of the // OffsetCoordinator is called. If CommitHandler is nil, Commit method will // return data using Offset attribute as store. CommitHandler func(consumerGroup string, topic string, partition int32, offset int64) error // OffsetHandler is callback function called whenever Offset method of the // OffsetCoordinator is called. If OffsetHandler is nil, Commit method will // use Offset attribute to retrieve the offset. OffsetHandler func(consumerGroup string, topic string, partition int32) (offset int64, metadata string, err error) } // Commit return result of CommitHandler callback set on coordinator. If // handler is nil, this method will use Offsets attribute to store data for // further use. func (c *OffsetCoordinator) Commit(topic string, partition int32, offset int64) error { if c.CommitHandler != nil { return c.CommitHandler(c.conf.ConsumerGroup, topic, partition, offset) } c.Offsets[fmt.Sprintf("%s:%d", topic, partition)] = offset return nil } // Offset return result of OffsetHandler callback set on coordinator. If // handler is nil, this method will use Offsets attribute to retrieve committed // offset. If no offset for given topic and partition pair was saved, // proto.ErrUnknownTopicOrPartition is returned. func (c *OffsetCoordinator) Offset(topic string, partition int32) (offset int64, metadata string, err error) { if c.OffsetHandler != nil { return c.OffsetHandler(c.conf.ConsumerGroup, topic, partition) } off, ok := c.Offsets[fmt.Sprintf("%s:%d", topic, partition)] if !ok { return 0, "", proto.ErrUnknownTopicOrPartition } return off, "", nil } kafka-2.1.1/v2/kafkatest/broker_test.go000066400000000000000000000021361356004474300177560ustar00rootroot00000000000000package kafkatest import ( "fmt" "sync" "testing" "time" "github.com/optiopay/kafka/v2" "github.com/optiopay/kafka/v2/proto" ) func TestBrokerProducer(t *testing.T) { broker := NewBroker() var wg sync.WaitGroup wg.Add(1) go readTestMessages(broker, t, &wg) producer := broker.Producer(kafka.NewProducerConf()) for i := 0; i < 4; i++ { wg.Add(1) go produceTestMessage(producer, t, &wg) } wg.Wait() } func readTestMessages(b *Broker, t *testing.T, wg *sync.WaitGroup) { defer wg.Done() var i int64 for i = 1; i <= 20; i++ { msg := <-b.produced if got := len(msg.Messages); got != 1 { t.Fatalf("expected 1 message, got: %d", got) } m := msg.Messages[0] if m.Offset != i { t.Errorf("expected offset to be larger: prev: %d, got: %d", i, m.Offset) } } } func produceTestMessage(p kafka.Producer, t *testing.T, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 5; i++ { now := time.Now().UnixNano() msg := &proto.Message{Value: []byte(fmt.Sprintf("%d", now))} _, err := p.Produce("my-topic", 0, msg) if err != nil { t.Errorf("cannot produce: %s", err) } } } kafka-2.1.1/v2/kafkatest/doc.go000066400000000000000000000003171356004474300161770ustar00rootroot00000000000000/* Package kafkatest provides mock objects for high level kafka interface. Use NewBroker function to create mock broker object and standard methods to create producers and consumers. */ package kafkatest kafka-2.1.1/v2/kafkatest/example_test.go000066400000000000000000000054221356004474300201260ustar00rootroot00000000000000package kafkatest import ( "errors" "fmt" "reflect" "time" "github.com/optiopay/kafka/v2" "github.com/optiopay/kafka/v2/proto" ) func ExampleBroker_Producer() { broker := NewBroker() msg := &proto.Message{Value: []byte("first")} producer := broker.Producer(kafka.NewProducerConf()) // mock server actions, handling any produce call go func() { resp, err := broker.ReadProducers(time.Millisecond * 20) if err != nil { panic(fmt.Sprintf("failed reading producers: %s", err)) } if len(resp.Messages) != 1 { panic("expected single message") } if !reflect.DeepEqual(resp.Messages[0], msg) { panic("expected different message") } }() // provide data for above goroutine _, err := producer.Produce("my-topic", 0, msg) if err != nil { panic(fmt.Sprintf("cannot produce message: %s", err)) } mockProducer := producer.(*Producer) // test error handling by forcing producer to return error, // // it is possible to manipulate produce result by changing producer's // ResponseOffset and ResponseError attributes mockProducer.ResponseError = errors.New("my spoon is too big!") _, err = producer.Produce("my-topic", 0, msg) fmt.Printf("Error: %s\n", err) // output: // // Error: my spoon is too big! } func ExampleBroker_Consumer() { broker := NewBroker() msg := &proto.Message{Value: []byte("first")} // mock server actions, pushing data through consumer go func() { consumer, _ := broker.Consumer(kafka.NewConsumerConf("my-topic", 0)) c := consumer.(*Consumer) // it is possible to send messages through consumer... c.Messages <- msg // every consumer fetch call is blocking untill there is either message // or error ready to return, this way we can test slow consumers time.Sleep(time.Millisecond * 20) // ...as well as push errors to mock failure c.Errors <- errors.New("expected error is expected") }() // test broker never fails creating consumer consumer, _ := broker.Consumer(kafka.NewConsumerConf("my-topic", 0)) m, err := consumer.Consume() if err == nil { fmt.Printf("Value: %q\n", m.Value) } if _, err = consumer.Consume(); err != nil { fmt.Printf("Error: %s\n", err) } // output: // // Value: "first" // Error: expected error is expected } func ExampleServer() { // symulate server latency for all fetch requests delayFetch := func(nodeID int32, reqKind int16, content []byte) Response { if reqKind != proto.FetchReqKind { return nil } time.Sleep(time.Millisecond * 500) return nil } server := NewServer(delayFetch) server.MustSpawn() defer func() { _ = server.Close() }() fmt.Printf("running server: %s", server.Addr()) server.AddMessages("my-topic", 0, &proto.Message{Value: []byte("first")}, &proto.Message{Value: []byte("second")}) // connect to server using broker and fetch/write messages } kafka-2.1.1/v2/kafkatest/server.go000066400000000000000000000410611356004474300167410ustar00rootroot00000000000000package kafkatest import ( "bytes" "encoding/json" "fmt" "io" "log" "net" "net/http" "strconv" "strings" "sync" "time" "github.com/optiopay/kafka/v2/proto" ) type topicOffset struct { offset int64 metadata string } // Server is container for fake kafka server data. type Server struct { mu sync.RWMutex brokers []proto.MetadataRespBroker topics map[string]map[int32][]*proto.Message offsets map[string]map[int32]map[string]*topicOffset ln net.Listener middlewares []Middleware events chan struct{} } // Middleware is function that is called for every incomming kafka message, // before running default processing handler. Middleware function can return // nil or kafka response message. type Middleware func(nodeID int32, requestKind int16, content []byte) Response // Response is any kafka response as defined in kafka/proto package type Response interface { Bytes() ([]byte, error) } // NewServer return new mock server instance. Any number of middlewares can be // passed to customize request handling. For every incomming request, all // middlewares are called one after another in order they were passed. If any // middleware return non nil response message, response is instasntly written // to the client and no further code execution for the request is made -- no // other middleware is called nor the default handler is executed. func NewServer(middlewares ...Middleware) *Server { s := &Server{ brokers: make([]proto.MetadataRespBroker, 0), topics: make(map[string]map[int32][]*proto.Message), offsets: make(map[string]map[int32]map[string]*topicOffset), middlewares: middlewares, events: make(chan struct{}, 1000), } return s } // Addr return server instance address or empty string if not running. func (s *Server) Addr() string { s.mu.RLock() defer s.mu.RUnlock() if s.ln != nil { return s.ln.Addr().String() } return "" } // Reset will clear out local messages and topics. func (s *Server) Reset() { s.mu.Lock() defer s.mu.Unlock() s.topics = make(map[string]map[int32][]*proto.Message) } // Close shut down server if running. It is safe to call it more than once. func (s *Server) Close() (err error) { s.mu.Lock() defer s.mu.Unlock() if s.ln != nil { err = s.ln.Close() s.ln = nil } return err } // ServeHTTP provides JSON serialized server state information. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.mu.Lock() defer s.mu.Unlock() topics := make(map[string]map[string][]*proto.Message) for name, parts := range s.topics { topics[name] = make(map[string][]*proto.Message) for part, messages := range parts { topics[name][strconv.Itoa(int(part))] = messages } } w.Header().Set("content-type", "application/json") err := json.NewEncoder(w).Encode(map[string]interface{}{ "topics": topics, "brokers": s.brokers, }) if err != nil { log.Printf("cannot JSON encode state: %s", err) } } // AddMessages append messages to given topic/partition. If topic or partition // does not exists, it is being created. // To only create topic/partition, call this method withough giving any // message. func (s *Server) AddMessages(topic string, partition int32, messages ...*proto.Message) { s.mu.Lock() defer s.mu.Unlock() parts, ok := s.topics[topic] if !ok { parts = make(map[int32][]*proto.Message) s.topics[topic] = parts } for i := int32(0); i <= partition; i++ { if _, ok := parts[i]; !ok { parts[i] = make([]*proto.Message, 0) } } if len(messages) > 0 { start := len(parts[partition]) for i, msg := range messages { msg.Offset = int64(start + i) msg.Partition = partition msg.Topic = topic } parts[partition] = append(parts[partition], messages...) } } // Run starts kafka mock server listening on given address. func (s *Server) Run(addr string) error { const nodeID = 100 s.mu.RLock() if s.ln != nil { s.mu.RUnlock() log.Printf("server already running: %s", s.ln.Addr()) return fmt.Errorf("server already running: %s", s.ln.Addr()) } ln, err := net.Listen("tcp4", addr) if err != nil { s.mu.RUnlock() log.Printf("cannot listen on address %q: %s", addr, err) return fmt.Errorf("cannot listen: %s", err) } defer func() { _ = ln.Close() }() s.ln = ln if host, port, err := net.SplitHostPort(ln.Addr().String()); err != nil { s.mu.RUnlock() log.Printf("cannot extract host/port from %q: %s", ln.Addr(), err) return fmt.Errorf("cannot extract host/port from %q: %s", ln.Addr(), err) } else { prt, err := strconv.Atoi(port) if err != nil { s.mu.RUnlock() log.Printf("invalid port %q: %s", port, err) return fmt.Errorf("invalid port %q: %s", port, err) } s.brokers = append(s.brokers, proto.MetadataRespBroker{ NodeID: nodeID, Host: host, Port: int32(prt), }) } s.mu.RUnlock() for { conn, err := ln.Accept() if err == nil { go s.handleClient(nodeID, conn) } } } // MustSpawn run server in the background on random port. It panics if server // cannot be spawned. // Use Close method to stop spawned server. func (s *Server) MustSpawn() { const nodeID = 100 s.mu.Lock() defer s.mu.Unlock() if s.ln != nil { return } ln, err := net.Listen("tcp4", "localhost:0") if err != nil { panic(fmt.Sprintf("cannot listen: %s", err)) } s.ln = ln if host, port, err := net.SplitHostPort(ln.Addr().String()); err != nil { panic(fmt.Sprintf("cannot extract host/port from %q: %s", ln.Addr(), err)) } else { prt, err := strconv.Atoi(port) if err != nil { panic(fmt.Sprintf("invalid port %q: %s", port, err)) } s.brokers = append(s.brokers, proto.MetadataRespBroker{ NodeID: nodeID, Host: host, Port: int32(prt), }) } go func() { for { conn, err := ln.Accept() if err == nil { go s.handleClient(nodeID, conn) } } }() } func (s *Server) handleClient(nodeID int32, conn net.Conn) { defer func() { _ = conn.Close() }() for { kind, b, err := proto.ReadReq(conn) if err != nil { if err != io.EOF { log.Printf("client read error: %s", err) } return } var resp response for _, middleware := range s.middlewares { resp = middleware(nodeID, kind, b) if resp != nil { break } } if resp == nil { switch kind { case proto.ProduceReqKind: req, err := proto.ReadProduceReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse produce request: %s\n%s", err, b) return } resp = s.handleProduceRequest(nodeID, conn, req) case proto.FetchReqKind: req, err := proto.ReadFetchReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse fetch request: %s\n%s", err, b) return } resp = s.handleFetchRequest(nodeID, conn, req) case proto.OffsetReqKind: req, err := proto.ReadOffsetReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse offset request: %s\n%s", err, b) return } resp = s.handleOffsetRequest(nodeID, conn, req) case proto.MetadataReqKind: req, err := proto.ReadMetadataReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse metadata request: %s\n%s", err, b) return } resp = s.handleMetadataRequest(nodeID, conn, req) case proto.OffsetCommitReqKind: req, err := proto.ReadOffsetCommitReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse offset commit request: %s\n%s", err, b) return } resp = s.handleOffsetCommitRequest(nodeID, conn, req) case proto.OffsetFetchReqKind: req, err := proto.ReadOffsetFetchReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse offset fetch request: %s\n%s", err, b) return } resp = s.handleOffsetFetchRequest(nodeID, conn, req) case proto.ConsumerMetadataReqKind: req, err := proto.ReadConsumerMetadataReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse consumer metadata request: %s\n%s", err, b) return } resp = s.handleConsumerMetadataRequest(nodeID, conn, req) case proto.APIVersionsReqKind: req, err := proto.ReadAPIVersionsReq(bytes.NewBuffer(b)) if err != nil { log.Printf("cannot parse API version request: %s\n%s", err, b) return } resp = s.handleAPIVersionsRequest(nodeID, conn, req) default: log.Printf("unknown request: %d\n%s", kind, b) return } } if resp == nil { log.Printf("no response for %d", kind) return } b, err = resp.Bytes() if err != nil { log.Printf("cannot serialize %T response: %s", resp, err) } if _, err := conn.Write(b); err != nil { log.Printf("cannot write %T response: %s", resp, err) return } } } type response interface { Bytes() ([]byte, error) } func (s *Server) handleProduceRequest(nodeID int32, conn net.Conn, req *proto.ProduceReq) response { s.mu.Lock() defer s.mu.Unlock() resp := &proto.ProduceResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.ProduceRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { t, ok := s.topics[topic.Name] if !ok { t = make(map[int32][]*proto.Message) s.topics[topic.Name] = t } respParts := make([]proto.ProduceRespPartition, len(topic.Partitions)) resp.Topics[ti].Name = topic.Name resp.Topics[ti].Partitions = respParts for pi, part := range topic.Partitions { p, ok := t[part.ID] if !ok { p = make([]*proto.Message, 0) t[part.ID] = p } for _, msg := range part.Messages { msg.Offset = int64(len(t[part.ID])) msg.Topic = topic.Name t[part.ID] = append(t[part.ID], msg) } respParts[pi].ID = part.ID respParts[pi].Offset = int64(len(t[part.ID])) - 1 } } s.events <- struct{}{} return resp } func (s *Server) fetchRequest(req *proto.FetchReq) (response, int) { s.mu.RLock() defer s.mu.RUnlock() var messagesNum int resp := &proto.FetchResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.FetchRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { respParts := make([]proto.FetchRespPartition, len(topic.Partitions)) resp.Topics[ti].Name = topic.Name resp.Topics[ti].Partitions = respParts for pi, part := range topic.Partitions { respParts[pi].ID = part.ID partitions, ok := s.topics[topic.Name] if !ok { respParts[pi].Err = proto.ErrUnknownTopicOrPartition continue } messages, ok := partitions[part.ID] if !ok { respParts[pi].Err = proto.ErrUnknownTopicOrPartition continue } if part.FetchOffset > int64(len(messages)) { respParts[pi].Err = proto.ErrOffsetOutOfRange continue } respParts[pi].TipOffset = int64(len(messages)) respParts[pi].Messages = messages[part.FetchOffset:] messagesNum += len(messages[part.FetchOffset:]) } } return resp, messagesNum } func (s *Server) handleFetchRequest(nodeID int32, conn net.Conn, req *proto.FetchReq) response { resp, n := s.fetchRequest(req) if n == 0 { select { case _ = <-s.events: case _ = <-time.After(time.Second): } resp, _ = s.fetchRequest(req) } return resp } func (s *Server) handleOffsetRequest(nodeID int32, conn net.Conn, req *proto.OffsetReq) response { s.mu.RLock() defer s.mu.RUnlock() resp := &proto.OffsetResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.OffsetRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { respPart := make([]proto.OffsetRespPartition, len(topic.Partitions)) resp.Topics[ti].Name = topic.Name resp.Topics[ti].Partitions = respPart for pi, part := range topic.Partitions { respPart[pi].ID = part.ID switch part.TimeMs { case -1: // oldest msgs := len(s.topics[topic.Name][part.ID]) respPart[pi].Offsets = []int64{int64(msgs), 0} case -2: // earliest respPart[pi].Offsets = []int64{0, 0} default: log.Printf("offset time for %s:%d not supported: %d", topic.Name, part.ID, part.TimeMs) return nil } } } return resp } func (s *Server) handleConsumerMetadataRequest(nodeID int32, conn net.Conn, req *proto.ConsumerMetadataReq) response { s.mu.RLock() defer s.mu.RUnlock() addrps := strings.Split(s.Addr(), ":") port, _ := strconv.Atoi(addrps[1]) return &proto.ConsumerMetadataResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), CoordinatorID: 0, CoordinatorHost: addrps[0], CoordinatorPort: int32(port), } } func (s *Server) handleAPIVersionsRequest(nodeID int32, conn net.Conn, req *proto.APIVersionsReq) response { s.mu.RLock() defer s.mu.RUnlock() return &proto.APIVersionsResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), APIVersions: []proto.SupportedVersion{ {APIKey: proto.ProduceReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.FetchReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.OffsetReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.MetadataReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.OffsetCommitReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.OffsetFetchReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.ConsumerMetadataReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, {APIKey: proto.APIVersionsReqKind, MinVersion: proto.KafkaV0, MaxVersion: proto.KafkaV0}, }, } } func (s *Server) getTopicOffset(group, topic string, partID int32) *topicOffset { pmap, ok := s.offsets[topic] if !ok { pmap = make(map[int32]map[string]*topicOffset) s.offsets[topic] = pmap } groups, ok := pmap[partID] if !ok { groups = make(map[string]*topicOffset) pmap[partID] = groups } toffset, ok := groups[group] if !ok { toffset = &topicOffset{} groups[group] = toffset } return toffset } func (s *Server) handleOffsetFetchRequest(nodeID int32, conn net.Conn, req *proto.OffsetFetchReq) response { s.mu.RLock() defer s.mu.RUnlock() resp := &proto.OffsetFetchResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.OffsetFetchRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { respPart := make([]proto.OffsetFetchRespPartition, len(topic.Partitions)) resp.Topics[ti].Name = topic.Name resp.Topics[ti].Partitions = respPart for pi, part := range topic.Partitions { toffset := s.getTopicOffset(req.ConsumerGroup, topic.Name, part) respPart[pi].ID = part respPart[pi].Metadata = toffset.metadata respPart[pi].Offset = toffset.offset } } return resp } func (s *Server) handleOffsetCommitRequest(nodeID int32, conn net.Conn, req *proto.OffsetCommitReq) response { s.mu.Lock() defer s.mu.Unlock() resp := &proto.OffsetCommitResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.OffsetCommitRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { respPart := make([]proto.OffsetCommitRespPartition, len(topic.Partitions)) resp.Topics[ti].Name = topic.Name resp.Topics[ti].Partitions = respPart for pi, part := range topic.Partitions { toffset := s.getTopicOffset(req.ConsumerGroup, topic.Name, part.ID) toffset.metadata = part.Metadata toffset.offset = part.Offset respPart[pi].ID = part.ID } } return resp } func (s *Server) handleMetadataRequest(nodeID int32, conn net.Conn, req *proto.MetadataReq) response { s.mu.RLock() defer s.mu.RUnlock() resp := &proto.MetadataResp{ Version: proto.KafkaV0, CorrelationID: req.GetCorrelationID(), Topics: make([]proto.MetadataRespTopic, 0, len(s.topics)), Brokers: s.brokers, } if req.Topics != nil && len(req.Topics) > 0 { // if particular topic was requested, create empty log if does not yet exists for _, name := range req.Topics { partitions, ok := s.topics[name] if !ok { partitions = make(map[int32][]*proto.Message) partitions[0] = make([]*proto.Message, 0) s.topics[name] = partitions } parts := make([]proto.MetadataRespPartition, len(partitions)) for pid := range partitions { p := &parts[pid] p.ID = pid p.Leader = nodeID p.Replicas = []int32{nodeID} p.Isrs = []int32{nodeID} } resp.Topics = append(resp.Topics, proto.MetadataRespTopic{ Name: name, Partitions: parts, }) } } else { for name, partitions := range s.topics { parts := make([]proto.MetadataRespPartition, len(partitions)) for pid := range partitions { p := &parts[pid] p.ID = pid p.Leader = nodeID p.Replicas = []int32{nodeID} p.Isrs = []int32{nodeID} } resp.Topics = append(resp.Topics, proto.MetadataRespTopic{ Name: name, Partitions: parts, }) } } return resp } kafka-2.1.1/v2/log.go000066400000000000000000000013171356004474300142370ustar00rootroot00000000000000package kafka // Logger is general logging interface that can be provided by popular logging // frameworks. // // * https://github.com/go-kit/kit/tree/master/log // * https://github.com/husio/log type Logger interface { Debug(msg string, args ...interface{}) Info(msg string, args ...interface{}) Warn(msg string, args ...interface{}) Error(msg string, args ...interface{}) } // nullLogger implements Logger interface, but discards all messages type nullLogger struct { } func (nullLogger) Debug(msg string, args ...interface{}) {} func (nullLogger) Info(msg string, args ...interface{}) {} func (nullLogger) Warn(msg string, args ...interface{}) {} func (nullLogger) Error(msg string, args ...interface{}) {} kafka-2.1.1/v2/multiplexer.go000066400000000000000000000057411356004474300160350ustar00rootroot00000000000000package kafka import ( "errors" "sync" "github.com/optiopay/kafka/v2/proto" ) // ErrMxClosed is returned as a result of closed multiplexer consumption. var ErrMxClosed = errors.New("closed") // Mx is multiplexer combining into single stream number of consumers. // // It is responsibility of the user of the multiplexer and the consumer // implementation to handle errors. // ErrNoData returned by consumer is not passed through by the multiplexer, // instead consumer that returned ErrNoData is removed from merged set. When // all consumers are removed (set is empty), Mx is automatically closed and any // further Consume call will result in ErrMxClosed error. // // It is important to remember that because fetch from every consumer is done // by separate worker, most of the time there is one message consumed by each // worker that is held in memory while waiting for opportunity to return it // once Consume on multiplexer is called. Closing multiplexer may result in // ignoring some of already read, waiting for delivery messages kept internally // by every worker. type Mx struct { errc chan error msgc chan *proto.Message stop chan struct{} mu sync.Mutex closed bool workers int } // Merge is merging consume result of any number of consumers into single stream // and expose them through returned multiplexer. func Merge(consumers ...Consumer) *Mx { p := &Mx{ errc: make(chan error), msgc: make(chan *proto.Message), stop: make(chan struct{}), workers: len(consumers), } for _, consumer := range consumers { go func(c Consumer) { defer func() { p.mu.Lock() p.workers-- if p.workers == 0 && !p.closed { close(p.stop) p.closed = true } p.mu.Unlock() }() for { msg, err := c.Consume() if err != nil { if err == ErrNoData { return } select { case p.errc <- err: case <-p.stop: return } } else { select { case p.msgc <- msg: case <-p.stop: return } } } }(consumer) } return p } // Workers return number of active consumer workers that are pushing messages // to multiplexer conumer queue. func (p *Mx) Workers() int { p.mu.Lock() defer p.mu.Unlock() return p.workers } // Close is closing multiplexer and stopping all underlying workers. // // Closing multiplexer will stop all workers as soon as possible, but any // consume-in-progress action performed by worker has to be finished first. Any // consumption result received after closing multiplexer is ignored. // // Close is returning without waiting for all the workers to finish. // // Closing closed multiplexer has no effect. func (p *Mx) Close() { p.mu.Lock() defer p.mu.Unlock() if !p.closed { p.closed = true close(p.stop) } } // Consume returns Consume result from any of the merged consumer. func (p *Mx) Consume() (*proto.Message, error) { select { case <-p.stop: return nil, ErrMxClosed case msg := <-p.msgc: return msg, nil case err := <-p.errc: return nil, err } } kafka-2.1.1/v2/multiplexer_test.go000066400000000000000000000061441356004474300170720ustar00rootroot00000000000000package kafka import ( "errors" "testing" "time" "github.com/optiopay/kafka/v2/proto" ) type fetcher struct { messages []*proto.Message errors []error } func (f *fetcher) Consume() (*proto.Message, error) { // sleep a bit to let the other's work time.Sleep(time.Microsecond * 500) if len(f.messages) > 0 { msg := f.messages[0] f.messages = f.messages[1:] return msg, nil } if len(f.errors) > 0 { err := f.errors[0] f.errors = f.errors[1:] return nil, err } panic("not implemented") } func TestMultiplexerConsume(t *testing.T) { fetchers := []Consumer{ &fetcher{ messages: []*proto.Message{ {Value: []byte("first")}, {Value: []byte("second")}, }, errors: []error{ errors.New("e first"), errors.New("e second"), errors.New("e third"), }, }, &fetcher{ messages: []*proto.Message{ {Value: []byte("1")}, {Value: []byte("2")}, }, errors: []error{ errors.New("e 1"), errors.New("e 2"), errors.New("e 3"), }, }, } results := make(map[string]bool) mx := Merge(fetchers...) defer mx.Close() for i := 0; i < 8; i++ { msg, err := mx.Consume() if err != nil { results[err.Error()] = true } else { results[string(msg.Value)] = true } } expected := []string{ "first", "second", "e first", "e second", "1", "2", "e 1", "e 2", } // expected 4 messages and 2 errors if len(results) != len(expected) { t.Errorf("expected %d results, got %d", len(expected), len(results)) } for _, name := range expected { if results[name] != true { t.Errorf("%q not found: %#v", name, results) } } } func TestClosingMultiplexer(t *testing.T) { fetchers := []Consumer{ &fetcher{errors: []error{errors.New("a1")}}, &fetcher{errors: []error{errors.New("b1")}}, &fetcher{errors: []error{errors.New("c1")}}, } mx := Merge(fetchers...) // closing more than once should be fine for i := 0; i < 4; i++ { go mx.Close() } mx.Close() mx.Close() if _, err := mx.Consume(); err != ErrMxClosed { t.Fatalf("expected %s, got %s", ErrMxClosed, err) } } type blockingFetcher struct { stop chan struct{} } func (f *blockingFetcher) Consume() (*proto.Message, error) { <-f.stop return nil, errors.New("blocking fetcher is done") } func (f *blockingFetcher) Close() { close(f.stop) } func TestClosingMultiplexerWithBlockingWorkers(t *testing.T) { f1 := &blockingFetcher{make(chan struct{})} defer f1.Close() f2 := &blockingFetcher{make(chan struct{})} defer f2.Close() mx := Merge(f1, f2) // close should be instant - without waiting for workers to finish mx.Close() if _, err := mx.Consume(); err != ErrMxClosed { t.Fatalf("expected %s, got %s", ErrMxClosed, err) } } func TestErrNoDataCloseMultiplexer(t *testing.T) { fetchers := []Consumer{ &fetcher{errors: []error{ErrNoData}}, &fetcher{errors: []error{ErrNoData}, messages: []*proto.Message{{}}}, &fetcher{errors: []error{ErrNoData}}, } mx := Merge(fetchers...) if _, err := mx.Consume(); err != nil { t.Fatalf("first consume should succeed, got %s", err) } if _, err := mx.Consume(); err != ErrMxClosed { t.Fatalf("expected %s, got %s", ErrMxClosed, err) } } kafka-2.1.1/v2/proto/000077500000000000000000000000001356004474300142705ustar00rootroot00000000000000kafka-2.1.1/v2/proto/doc.go000066400000000000000000000001231356004474300153600ustar00rootroot00000000000000/* Package proto provides kafka binary protocol implementation. */ package proto kafka-2.1.1/v2/proto/errors.go000066400000000000000000000261131356004474300161360ustar00rootroot00000000000000package proto import ( "fmt" ) var ( ErrUnknown = &KafkaError{-1, "unknown error"} ErrOffsetOutOfRange = &KafkaError{1, "offset out of range"} ErrInvalidMessage = &KafkaError{2, "invalid message"} ErrUnknownTopicOrPartition = &KafkaError{3, "unknown topic or partition"} ErrInvalidMessageSize = &KafkaError{4, "invalid message size"} ErrLeaderNotAvailable = &KafkaError{5, "leader not available"} ErrNotLeaderForPartition = &KafkaError{6, "not leader for partition"} ErrRequestTimeout = &KafkaError{7, "request timeed out"} ErrBrokerNotAvailable = &KafkaError{8, "broker not available"} ErrReplicaNotAvailable = &KafkaError{9, "replica not available"} ErrMessageSizeTooLarge = &KafkaError{10, "message size too large"} ErrScaleControllerEpoch = &KafkaError{11, "scale controller epoch"} ErrOffsetMetadataTooLarge = &KafkaError{12, "offset metadata too large"} ErrNetwork = &KafkaError{13, "server disconnected before response was received"} ErrOffsetLoadInProgress = &KafkaError{14, "offsets load in progress"} ErrNoCoordinator = &KafkaError{15, "consumer coordinator not available"} ErrNotCoordinator = &KafkaError{16, "not coordinator for consumer"} ErrInvalidTopic = &KafkaError{17, "operation on an invalid topic"} ErrRecordListTooLarge = &KafkaError{18, "message batch larger than the configured segment size"} ErrNotEnoughReplicas = &KafkaError{19, "not enough in-sync replicas"} ErrNotEnoughReplicasAfterAppend = &KafkaError{20, "messages are written to the log, but to fewer in-sync replicas than required"} ErrInvalidRequiredAcks = &KafkaError{21, "invalid value for required acks"} ErrIllegalGeneration = &KafkaError{22, "consumer generation id is not valid"} ErrInconsistentPartitionAssignmentStrategy = &KafkaError{23, "partition assignment strategy does not match that of the group"} ErrUnknownParititonAssignmentStrategy = &KafkaError{24, "partition assignment strategy is unknown to the broker"} ErrUnknownConsumerID = &KafkaError{25, "coordinator is not aware of this consumer"} ErrInvalidSessionTimeout = &KafkaError{26, "invalid session timeout"} ErrRebalanceInProgress = &KafkaError{27, "group is rebalancing, so a rejoin is needed"} ErrInvalidCommitOffsetSize = &KafkaError{28, "offset data size is not valid"} ErrTopicAuthorizationFailed = &KafkaError{29, "topic authorization failed"} ErrGroupAuthorizationFailed = &KafkaError{30, "group authorization failed"} ErrClusterAuthorizationFailed = &KafkaError{31, "cluster authorization failed"} ErrInvalidTimeStamp = &KafkaError{32, "timestamp of the message is out of acceptable range"} ErrUnsupportedSaslMechanism = &KafkaError{33, "The broker does not support the requested SASL mechanism."} ErrIllegalSaslState = &KafkaError{34, "Request is not valid given the current SASL state."} ErrUnsupportedVersion = &KafkaError{35, "The version of API is not supported."} ErrTopicAlreadyExists = &KafkaError{36, "Topic with this name already exists."} ErrInvalidPartitions = &KafkaError{37, "Number of partitions is invalid."} ErrInvalidReplicationFactor = &KafkaError{38, "Replication-factor is invalid."} ErrInvalidReplicaAssignment = &KafkaError{39, "Replica assignment is invalid."} ErrInvalidConfig = &KafkaError{40, "Configuration is invalid."} ErrNotController = &KafkaError{41, "This is not the correct controller for this cluster."} ErrInvalidRequest = &KafkaError{42, "This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."} ErrUnsupportedForMessageFormat = &KafkaError{43, "The message format version on the broker does not support the request."} ErrPolicyViolation = &KafkaError{44, "Request parameters do not satisfy the configured policy."} ErrOutOfOrderSequenceNumber = &KafkaError{45, "The broker received an out of order sequence number"} ErrDuplicateSequenceNumber = &KafkaError{46, "The broker received a duplicate sequence number"} ErrInvalidProducerEpoch = &KafkaError{47, "Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker."} ErrInvalidTxnState = &KafkaError{48, "The producer attempted a transactional operation in an invalid state"} ErrInvalidProducerIdMapping = &KafkaError{49, "The producer attempted to use a producer id which is not currently assigned to its transactional id"} ErrInvalidTransactionTimeout = &KafkaError{50, "The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms)."} ErrConcurrentTransactions = &KafkaError{51, "The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing"} ErrTransactionCoordinatorFenced = &KafkaError{52, "Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer"} ErrTransactionalIdAuthorizationFailed = &KafkaError{53, "Transactional Id authorization failed"} ErrSecurityDisabled = &KafkaError{54, "Security features are disabled."} ErrOperationNotAttempted = &KafkaError{55, "The broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest."} ErrKafkaStorageError = &KafkaError{56, "Disk error when trying to access log file on the disk."} ErrLogDirNotFound = &KafkaError{57, "The user-specified log directory is not found in the broker config."} ErrSaslAuthenticationFailed = &KafkaError{58, "SASL Authentication failed."} ErrUnknownProducerId = &KafkaError{59, "This exception is raised by the broker if it could not locate the producer metadata associated with the producerId in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception."} ErrReassignmentInProgress = &KafkaError{60, "A partition reassignment is in progress"} ErrDelegationTokenAuthDisabled = &KafkaError{61, "Delegation Token feature is not enabled."} ErrDelegationTokenNotFound = &KafkaError{62, "Delegation Token is not found on server."} ErrDelegationTokenOwnerMismatch = &KafkaError{63, "Specified Principal is not valid Owner/Renewer."} ErrDelegationTokenRequestNotAllowed = &KafkaError{64, "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."} ErrDelegationTokenAuthorizationFailed = &KafkaError{65, "Delegation Token authorization failed."} ErrDelegationTokenExpired = &KafkaError{66, "Delegation Token is expired."} ErrInvalidPrincipalType = &KafkaError{67, "Supplied principalType is not supported"} ErrNonEmptyGroup = &KafkaError{68, "The group The group is not empty is not empty"} ErrGroupIdNotFound = &KafkaError{69, "The group id The group id does not exist was not found"} ErrFetchSessionIdNotFound = &KafkaError{70, "The fetch session ID was not found"} ErrInvalidFetchSessionEpoch = &KafkaError{71, "The fetch session epoch is invalid"} errnoToErr = map[int16]error{ -1: ErrUnknown, 1: ErrOffsetOutOfRange, 2: ErrInvalidMessage, 3: ErrUnknownTopicOrPartition, 4: ErrInvalidMessageSize, 5: ErrLeaderNotAvailable, 6: ErrNotLeaderForPartition, 7: ErrRequestTimeout, 8: ErrBrokerNotAvailable, 9: ErrReplicaNotAvailable, 10: ErrMessageSizeTooLarge, 11: ErrScaleControllerEpoch, 12: ErrOffsetMetadataTooLarge, 13: ErrNetwork, 14: ErrOffsetLoadInProgress, 15: ErrNoCoordinator, 16: ErrNotCoordinator, 17: ErrInvalidTopic, 18: ErrRecordListTooLarge, 19: ErrNotEnoughReplicas, 20: ErrNotEnoughReplicasAfterAppend, 21: ErrInvalidRequiredAcks, 22: ErrIllegalGeneration, 23: ErrInconsistentPartitionAssignmentStrategy, 24: ErrUnknownParititonAssignmentStrategy, 25: ErrUnknownConsumerID, 26: ErrInvalidSessionTimeout, 27: ErrRebalanceInProgress, 28: ErrInvalidCommitOffsetSize, 29: ErrTopicAuthorizationFailed, 30: ErrGroupAuthorizationFailed, 31: ErrClusterAuthorizationFailed, 32: ErrInvalidCommitOffsetSize, 33: ErrUnsupportedSaslMechanism, 34: ErrIllegalSaslState, 35: ErrUnsupportedVersion, 36: ErrTopicAlreadyExists, 37: ErrInvalidPartitions, 38: ErrInvalidReplicationFactor, 39: ErrInvalidReplicaAssignment, 40: ErrInvalidConfig, 41: ErrNotController, 42: ErrInvalidRequest, 43: ErrUnsupportedForMessageFormat, 44: ErrPolicyViolation, 45: ErrOutOfOrderSequenceNumber, 46: ErrDuplicateSequenceNumber, 47: ErrInvalidProducerEpoch, 48: ErrInvalidTxnState, 49: ErrInvalidProducerIdMapping, 50: ErrInvalidTransactionTimeout, 51: ErrConcurrentTransactions, 52: ErrTransactionCoordinatorFenced, 53: ErrTransactionalIdAuthorizationFailed, 54: ErrSecurityDisabled, 55: ErrOperationNotAttempted, 56: ErrKafkaStorageError, 57: ErrLogDirNotFound, 58: ErrSaslAuthenticationFailed, 59: ErrUnknownProducerId, 60: ErrReassignmentInProgress, 61: ErrDelegationTokenAuthDisabled, 62: ErrDelegationTokenNotFound, 63: ErrDelegationTokenOwnerMismatch, 64: ErrDelegationTokenRequestNotAllowed, 65: ErrDelegationTokenAuthorizationFailed, 66: ErrDelegationTokenExpired, 67: ErrInvalidPrincipalType, 68: ErrNonEmptyGroup, 69: ErrGroupIdNotFound, 70: ErrFetchSessionIdNotFound, 71: ErrInvalidFetchSessionEpoch, } ) type KafkaError struct { errno int16 message string } func (err *KafkaError) Error() string { return fmt.Sprintf("%s (%d)", err.message, err.errno) } func (err *KafkaError) Errno() int { return int(err.errno) } func errFromNo(errno int16) error { if errno == 0 { return nil } err, ok := errnoToErr[errno] if !ok { return fmt.Errorf("unknown kafka error %d", errno) } return err } kafka-2.1.1/v2/proto/messages.go000066400000000000000000001641141356004474300164350ustar00rootroot00000000000000package proto import ( "bufio" "bytes" "compress/gzip" "encoding/binary" "errors" "fmt" "hash/crc32" "io" "io/ioutil" "time" "github.com/golang/snappy" ) /* Kafka wire protocol implemented as described in http://kafka.apache.org/protocol.html */ const ( KafkaV0 int16 = iota KafkaV1 KafkaV2 KafkaV3 KafkaV4 KafkaV5 ) const ( ProduceReqKind = 0 FetchReqKind = 1 OffsetReqKind = 2 MetadataReqKind = 3 OffsetCommitReqKind = 8 OffsetFetchReqKind = 9 ConsumerMetadataReqKind = 10 APIVersionsReqKind = 18 CreateTopicsReqKind = 19 ) const ( // receive the latest offset (i.e. the offset of the next coming message) OffsetReqTimeLatest = -1 // receive the earliest available offset. Note that because offsets are // pulled in descending order, asking for the earliest offset will always // return you a single element. OffsetReqTimeEarliest = -2 // Server will not send any response. RequiredAcksNone = 0 // Server will block until the message is committed by all in sync replicas // before sending a response. RequiredAcksAll = -1 // Server will wait the data is written to the local log before sending a // response. RequiredAcksLocal = 1 ) type Request interface { Kind() int16 GetHeader() *RequestHeader GetVersion() int16 GetCorrelationID() int32 GetClientID() string SetClientID(cliendID string) io.WriterTo Bytes() ([]byte, error) } var _ Request = &ProduceReq{} var _ Request = &FetchReq{} var _ Request = &OffsetReq{} var _ Request = &MetadataReq{} var _ Request = &OffsetCommitReq{} var _ Request = &OffsetFetchReq{} var _ Request = &ConsumerMetadataReq{} var _ Request = &APIVersionsReq{} var _ Request = &CreateTopicsReq{} func SetVersion(header *RequestHeader, version int16) { header.version = version } func SetCorrelationID(header *RequestHeader, correlationID int32) { header.correlationID = correlationID } type RequestHeader struct { version int16 correlationID int32 ClientID string } func (h *RequestHeader) GetHeader() *RequestHeader { return h } func (h *RequestHeader) GetVersion() int16 { return h.version } func (h *RequestHeader) GetCorrelationID() int32 { return h.correlationID } func (h *RequestHeader) GetClientID() string { return h.ClientID } func (h *RequestHeader) SetClientID(cliendID string) { h.ClientID = cliendID } var SupportedByDriver = map[int16]SupportedVersion{ ProduceReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV2}, FetchReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV5}, OffsetReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV2}, MetadataReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV5}, OffsetCommitReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV3}, OffsetFetchReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV3}, ConsumerMetadataReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV1}, APIVersionsReqKind: SupportedVersion{MinVersion: KafkaV0, MaxVersion: KafkaV1}, } type Compression int8 const ( CompressionNone Compression = 0 CompressionGzip Compression = 1 CompressionSnappy Compression = 2 ) // ParserConfig is optional configuration for the parser. It can be configured via // SetParserConfig type ParserConfig struct { // SimplifiedMessageSetParsing enables a simplified version of the // MessageSet parser which will not split MessageSet into slices of // Message structures. Instead, the entire MessageSet will be read // over. This mode improves parsing speed due to reduce memory read at // the cost of not providing access to the message payload after // parsing. SimplifiedMessageSetParsing bool } var ( conf ParserConfig ) // ConfigureParser configures the parser. It must be called prior to parsing // any messages as the structure is currently not prepared for concurrent // access. func ConfigureParser(c ParserConfig) error { conf = c return nil } func boolToInt8(val bool) int8 { res := int8(0) if val { res = 1 } return res } // ReadReq returns request kind ID and byte representation of the whole message // in wire protocol format. func ReadReq(r io.Reader) (requestKind int16, b []byte, err error) { dec := NewDecoder(r) msgSize := dec.DecodeInt32() requestKind = dec.DecodeInt16() if err := dec.Err(); err != nil { return 0, nil, err } // size of the message + size of the message itself b, err = allocParseBuf(int(msgSize + 4)) if err != nil { return 0, nil, err } binary.BigEndian.PutUint32(b, uint32(msgSize)) // only write back requestKind if it was included in messageSize if len(b) >= 6 { binary.BigEndian.PutUint16(b[4:], uint16(requestKind)) } // read rest of request into allocated buffer if we allocated for it if len(b) > 6 { if _, err := io.ReadFull(r, b[6:]); err != nil { return 0, nil, err } } return requestKind, b, nil } // ReadResp returns message correlation ID and byte representation of the whole // message in wire protocol that is returned when reading from given stream, // including 4 bytes of message size itself. // Byte representation returned by ReadResp can be parsed by all response // reeaders to transform it into specialized response structure. func ReadResp(r io.Reader) (correlationID int32, b []byte, err error) { dec := NewDecoder(r) msgSize := dec.DecodeInt32() correlationID = dec.DecodeInt32() if err := dec.Err(); err != nil { return 0, nil, err } // size of the message + size of the message itself b, err = allocParseBuf(int(msgSize + 4)) if err != nil { return 0, nil, err } binary.BigEndian.PutUint32(b, uint32(msgSize)) binary.BigEndian.PutUint32(b[4:], uint32(correlationID)) _, err = io.ReadFull(r, b[8:]) return correlationID, b, err } // Message represents single entity of message set. type Message struct { Key []byte Value []byte Offset int64 // set when fetching and after successful producing Crc uint32 // set when fetching, ignored when producing Topic string // set when fetching, ignored when producing Partition int32 // set when fetching, ignored when producing TipOffset int64 // set when fetching, ignored when processing } // ComputeCrc returns crc32 hash for given message content. func ComputeCrc(m *Message, compression Compression) uint32 { var buf bytes.Buffer enc := NewEncoder(&buf) enc.EncodeInt8(0) // magic byte is always 0 enc.EncodeInt8(int8(compression)) enc.EncodeBytes(m.Key) enc.EncodeBytes(m.Value) return crc32.ChecksumIEEE(buf.Bytes()) } // writeMessageSet writes a Message Set into w. // It returns the number of bytes written and any error. func writeMessageSet(w io.Writer, messages []*Message, compression Compression) (int, error) { if len(messages) == 0 { return 0, nil } // NOTE(caleb): it doesn't appear to be documented, but I observed that the // Java client sets the offset of the synthesized message set for a group of // compressed messages to be the offset of the last message in the set. compressOffset := messages[len(messages)-1].Offset switch compression { case CompressionGzip: var buf bytes.Buffer gz := gzip.NewWriter(&buf) if _, err := writeMessageSet(gz, messages, CompressionNone); err != nil { return 0, err } if err := gz.Close(); err != nil { return 0, err } messages = []*Message{ { Value: buf.Bytes(), Offset: compressOffset, }, } case CompressionSnappy: var buf bytes.Buffer if _, err := writeMessageSet(&buf, messages, CompressionNone); err != nil { return 0, err } messages = []*Message{ { Value: snappy.Encode(nil, buf.Bytes()), Offset: compressOffset, }, } } totalSize := 0 b, err := newSliceWriter(0) if err != nil { return 0, err } for _, message := range messages { bsize := 26 + len(message.Key) + len(message.Value) if err := b.Reset(bsize); err != nil { return 0, err } enc := NewEncoder(b) enc.EncodeInt64(message.Offset) msize := int32(14 + len(message.Key) + len(message.Value)) enc.EncodeInt32(msize) enc.EncodeUint32(0) // crc32 placeholder enc.EncodeInt8(0) // magic byte enc.EncodeInt8(int8(compression)) enc.EncodeBytes(message.Key) enc.EncodeBytes(message.Value) if err := enc.Err(); err != nil { return totalSize, err } const hsize = 8 + 4 + 4 // offset + message size + crc32 const crcoff = 8 + 4 // offset + message size binary.BigEndian.PutUint32(b.buf[crcoff:crcoff+4], crc32.ChecksumIEEE(b.buf[hsize:bsize])) if n, err := w.Write(b.Slice()); err != nil { return totalSize, err } else { totalSize += n } } return totalSize, nil } type slicewriter struct { buf []byte pos int size int } func newSliceWriter(bufsize int) (*slicewriter, error) { buf, err := allocParseBuf(bufsize) if err != nil { return nil, err } return &slicewriter{ buf: buf, pos: 0, }, nil } func (w *slicewriter) Write(p []byte) (int, error) { if len(w.buf) < w.pos+len(p) { return 0, errors.New("buffer too small") } copy(w.buf[w.pos:], p) w.pos += len(p) return len(p), nil } func (w *slicewriter) Reset(size int) error { if size > len(w.buf) { var err error w.buf, err = allocParseBuf(size + 1000) // allocate a bit more than required if err != nil { return err } } w.size = size w.pos = 0 return nil } func (w *slicewriter) Slice() []byte { return w.buf[:w.pos] } // readRecordBatch reasd and return record batch from the stream // RecordBatch replace MessageSet for kafka >= 0.11 // Because kafka is sending message set directly from the drive, it might cut // off part of the last message. This also means that the last message can be // shorter than the header is saying. In such case just ignore the last // malformed message from the set and returned earlier data. func readRecordBatch(r io.Reader) (*RecordBatch, error) { dec := NewDecoder(r) rb := &RecordBatch{} rb.FirstOffset = dec.DecodeInt64() rb.Length = dec.DecodeInt32() rb.PartitionLeaderEpoch = dec.DecodeInt32() // Magic byte. It represents a version of a message. // But we've already determinated that this is a record batch // and since there is only one version of record batch exists, we can just ignore it. _ = dec.DecodeInt8() rb.CRC = dec.DecodeInt32() crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) r = io.TeeReader(r, crc) dec.SetReader(r) rb.Attributes = dec.DecodeInt16() rb.LastOffsetDelta = dec.DecodeInt32() rb.FirstTimestamp = dec.DecodeInt64() rb.MaxTimestamp = dec.DecodeInt64() rb.ProducerId = dec.DecodeInt64() rb.ProducerEpoch = dec.DecodeInt16() rb.FirstSequence = dec.DecodeInt32() slen, err := dec.DecodeArrayLen() if err != nil { return nil, err } switch rb.Compression() { case CompressionNone: break case CompressionGzip: r, err = gzip.NewReader(r) if err != nil { return nil, err } allUnzipped, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(allUnzipped) dec.SetReader(r) case CompressionSnappy: var err error val, err := ioutil.ReadAll(r) if err != nil { return nil, err } decoded, err := snappyDecode(val) if err != nil { return nil, err } r = bytes.NewReader(decoded) dec.SetReader(r) default: return nil, errors.New("Unknown compression") } if dec.Err() != nil { return nil, dec.Err() } rb.Records = make([]*Record, 0, slen) for i := 0; i < slen; i++ { rec, err := readRecord(dec) if err != nil { return nil, err } rb.Records = append(rb.Records, rec) } if uint32(rb.CRC) != crc.Sum32() { return nil, fmt.Errorf("Wrong CRC32") } return rb, nil } func readRecord(dec *decoder) (*Record, error) { rec := &Record{} rec.Length = dec.DecodeVarInt() rec.Attributes = dec.DecodeInt8() rec.TimestampDelta = dec.DecodeVarInt() rec.OffsetDelta = dec.DecodeVarInt() rec.Key = dec.DecodeVarBytes() rec.Value = dec.DecodeVarBytes() headersLen := dec.DecodeVarInt() rec.Headers = make([]RecordHeader, headersLen) for i := range rec.Headers { rec.Headers[i].Key = dec.DecodeVarString() rec.Headers[i].Value = dec.DecodeVarBytes() } return rec, dec.Err() } // readMessageSet reads and return messages from the stream. // The size is known before a message set is decoded. // Because kafka is sending message set directly from the drive, it might cut // off part of the last message. This also means that the last message can be // shorter than the header is saying. In such case just ignore the last // malformed message from the set and returned earlier data. func readMessageSet(r io.Reader, size int32) ([]*Message, error) { if size < 0 || size > maxParseBufSize { return nil, messageSizeError(int(size)) } if conf.SimplifiedMessageSetParsing { msgbuf, err := allocParseBuf(int(size)) if err != nil { return nil, err } if _, err := io.ReadFull(r, msgbuf); err != nil { return nil, err } return make([]*Message, 0, 0), nil } dec := NewDecoder(r) set := make([]*Message, 0, 256) for { offset := dec.DecodeInt64() if err := dec.Err(); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { return set, nil } return nil, err } // single message size size := dec.DecodeInt32() if err := dec.Err(); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { return set, nil } return nil, err } // Skip over empty messages if size <= int32(0) { return set, nil } msgbuf, err := allocParseBuf(int(size)) if err != nil { return nil, err } if _, err := io.ReadFull(r, msgbuf); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { return set, nil } return nil, err } msgdec := NewDecoder(bytes.NewBuffer(msgbuf)) msg := &Message{ Offset: offset, Crc: msgdec.DecodeUint32(), } // MessageSet with no payload if size <= int32(4) { set = append(set, msg) return set, nil } if msg.Crc != crc32.ChecksumIEEE(msgbuf[4:]) { // ignore this message and because we want to have constant // history, do not process anything more return set, nil } // magic byte messageVersion := MessageVersion(msgdec.DecodeInt8()) attributes := msgdec.DecodeInt8() if messageVersion == MessageV1 { // timestamp _ = msgdec.DecodeInt64() } switch compression := Compression(attributes & 3); compression { case CompressionNone: msg.Key = msgdec.DecodeBytes() msg.Value = msgdec.DecodeBytes() if err := msgdec.Err(); err != nil { return nil, err } set = append(set, msg) case CompressionGzip, CompressionSnappy: _ = msgdec.DecodeBytes() // ignore key val := msgdec.DecodeBytes() if err := msgdec.Err(); err != nil { return nil, err } var decoded []byte switch compression { case CompressionGzip: cr, err := gzip.NewReader(bytes.NewReader(val)) if err != nil { return nil, err } decoded, err = ioutil.ReadAll(cr) if err != nil { return nil, err } _ = cr.Close() case CompressionSnappy: var err error decoded, err = snappyDecode(val) if err != nil { return nil, err } } msgs, err := readMessageSet(bytes.NewReader(decoded), int32(len(decoded))) if err != nil { return nil, err } set = append(set, msgs...) default: return nil, err } } } func encodeHeader(e *encoder, r Request) { // message size - for now just placeholder e.EncodeInt32(0) e.EncodeInt16(r.Kind()) e.EncodeInt16(r.GetVersion()) e.EncodeInt32(r.GetCorrelationID()) e.EncodeString(r.GetClientID()) } func decodeHeader(dec *decoder, req Request) { // total message size _ = dec.DecodeInt32() // api key _ = dec.DecodeInt16() SetVersion(req.GetHeader(), dec.DecodeInt16()) SetCorrelationID(req.GetHeader(), dec.DecodeInt32()) req.SetClientID(dec.DecodeString()) } type MetadataReq struct { RequestHeader Topics []string AllowAutoTopicCreation bool // >= KafkaV4 only } func ReadMetadataReq(r io.Reader) (*MetadataReq, error) { var req MetadataReq dec := NewDecoder(r) decodeHeader(dec, &req) len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]string, len) for i := range req.Topics { req.Topics[i] = dec.DecodeString() } if req.version >= KafkaV4 { req.AllowAutoTopicCreation = dec.DecodeInt8() != 0 } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r MetadataReq) Kind() int16 { return MetadataReqKind } func (r *MetadataReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) if len(r.Topics) == 0 { if r.version >= 1 { enc.EncodeArrayLen(-1) } else { enc.EncodeArrayLen(0) } } else { enc.EncodeArrayLen(len(r.Topics)) } for _, name := range r.Topics { enc.EncodeString(name) } if r.version >= KafkaV4 { enc.EncodeInt8(boolToInt8(r.AllowAutoTopicCreation)) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *MetadataReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type MetadataResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration // >= KafkaV3 Brokers []MetadataRespBroker ClusterID string // >= KafkaV2 ControllerID int32 // >= KafkaV1 Topics []MetadataRespTopic } type MetadataRespBroker struct { NodeID int32 Host string Port int32 Rack string // >= KafkaV1 } type MetadataRespTopic struct { Name string Err error IsInternal bool // >= KafkaV1 Partitions []MetadataRespPartition } type MetadataRespPartition struct { Err error ID int32 Leader int32 Replicas []int32 Isrs []int32 OfflineReplicas []int32 } func (r *MetadataResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV3 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.Brokers)) for _, broker := range r.Brokers { enc.EncodeInt32(broker.NodeID) enc.EncodeString(broker.Host) enc.EncodeInt32(broker.Port) if r.Version >= KafkaV1 { enc.EncodeString(broker.Rack) } } if r.Version >= KafkaV2 { enc.EncodeString(r.ClusterID) } if r.Version >= KafkaV1 { enc.EncodeInt32(r.ControllerID) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeError(topic.Err) enc.EncodeString(topic.Name) if r.Version >= KafkaV1 { enc.EncodeInt8(boolToInt8(topic.IsInternal)) } enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeError(part.Err) enc.EncodeInt32(part.ID) enc.EncodeInt32(part.Leader) enc.EncodeInt32s(part.Replicas) enc.EncodeInt32s(part.Isrs) if r.Version >= KafkaV5 { enc.EncodeInt32s(part.OfflineReplicas) } } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func ReadMetadataResp(r io.Reader) (*MetadataResp, error) { return ReadVersionedMetadataResp(r, KafkaV0) } func ReadVersionedMetadataResp(r io.Reader, version int16) (*MetadataResp, error) { var resp MetadataResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if resp.Version >= KafkaV3 { resp.ThrottleTime = dec.DecodeDuration32() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Brokers = make([]MetadataRespBroker, len) for i := range resp.Brokers { var b = &resp.Brokers[i] b.NodeID = dec.DecodeInt32() b.Host = dec.DecodeString() b.Port = dec.DecodeInt32() if resp.Version >= KafkaV1 { b.Rack = dec.DecodeString() } } if resp.Version >= KafkaV2 { resp.ClusterID = dec.DecodeString() } if resp.Version >= KafkaV1 { resp.ControllerID = dec.DecodeInt32() } len, err = dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]MetadataRespTopic, len) for ti := range resp.Topics { var t = &resp.Topics[ti] t.Err = errFromNo(dec.DecodeInt16()) t.Name = dec.DecodeString() if resp.Version >= KafkaV1 { t.IsInternal = (dec.DecodeInt8() == 1) } len, err = dec.DecodeArrayLen() if err != nil { return nil, err } t.Partitions = make([]MetadataRespPartition, len) for pi := range t.Partitions { var p = &t.Partitions[pi] p.Err = errFromNo(dec.DecodeInt16()) p.ID = dec.DecodeInt32() p.Leader = dec.DecodeInt32() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } p.Replicas = make([]int32, len) for ri := range p.Replicas { p.Replicas[ri] = dec.DecodeInt32() } len, err = dec.DecodeArrayLen() if err != nil { return nil, err } p.Isrs = make([]int32, len) for ii := range p.Isrs { p.Isrs[ii] = dec.DecodeInt32() } if resp.Version >= KafkaV5 { len, err = dec.DecodeArrayLen() if err != nil { return nil, err } p.OfflineReplicas = make([]int32, len) for ii := range p.OfflineReplicas { p.OfflineReplicas[ii] = dec.DecodeInt32() } } } } if dec.Err() != nil { return nil, dec.Err() } return &resp, nil } type FetchReq struct { RequestHeader ReplicaID int32 MaxWaitTime time.Duration MinBytes int32 MaxBytes int32 // >= KafkaV3 IsolationLevel int8 // >= KafkaV4 Topics []FetchReqTopic } type FetchReqTopic struct { Name string Partitions []FetchReqPartition } type FetchReqPartition struct { ID int32 FetchOffset int64 LogStartOffset int64 // >= KafkaV5 MaxBytes int32 } func ReadFetchReq(r io.Reader) (*FetchReq, error) { var req FetchReq dec := NewDecoder(r) decodeHeader(dec, &req) req.ReplicaID = dec.DecodeInt32() req.MaxWaitTime = dec.DecodeDuration32() req.MinBytes = dec.DecodeInt32() if req.version >= KafkaV3 { req.MaxBytes = dec.DecodeInt32() } if req.version >= KafkaV4 { req.IsolationLevel = dec.DecodeInt8() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]FetchReqTopic, len) for ti := range req.Topics { var topic = &req.Topics[ti] topic.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]FetchReqPartition, len) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = dec.DecodeInt32() part.FetchOffset = dec.DecodeInt64() if req.version >= KafkaV5 { part.LogStartOffset = dec.DecodeInt64() } part.MaxBytes = dec.DecodeInt32() } } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r FetchReq) Kind() int16 { return FetchReqKind } func (r *FetchReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) //enc.Encode(r.ReplicaID) enc.EncodeInt32(-1) enc.EncodeDuration(r.MaxWaitTime) enc.EncodeInt32(r.MinBytes) if r.version >= KafkaV3 { enc.EncodeInt32(r.MaxBytes) } if r.version >= KafkaV4 { enc.EncodeInt8(r.IsolationLevel) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeInt64(part.FetchOffset) if r.version >= KafkaV5 { enc.EncodeInt64(part.LogStartOffset) } enc.EncodeInt32(part.MaxBytes) } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *FetchReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type FetchResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration Topics []FetchRespTopic } type FetchRespTopic struct { Name string Partitions []FetchRespPartition } // Message version define which format of messages // is using in this particular Produce/Response // MessageV0 and MessageV1 indicate usage of MessageSet // MessageV3 indicate usage of RecordBatch // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets type MessageVersion int8 const MessageV0 MessageVersion = 0 const MessageV1 MessageVersion = 1 const MessageV2 MessageVersion = 2 type FetchRespPartition struct { ID int32 Err error TipOffset int64 LastStableOffset int64 LogStartOffset int64 AbortedTransactions []FetchRespAbortedTransaction Messages []*Message MessageVersion MessageVersion RecordBatches []*RecordBatch } type FetchRespAbortedTransaction struct { ProducerID int64 FirstOffset int64 } type RecordBatch struct { FirstOffset int64 Length int32 PartitionLeaderEpoch int32 Magic int8 CRC int32 Attributes int16 LastOffsetDelta int32 FirstTimestamp int64 MaxTimestamp int64 ProducerId int64 ProducerEpoch int16 FirstSequence int32 Records []*Record } type Record struct { Length int64 Attributes int8 TimestampDelta int64 OffsetDelta int64 Key []byte Value []byte Headers []RecordHeader } type RecordHeader struct { Key string Value []byte } func (rb *RecordBatch) Compression() Compression { return Compression(rb.Attributes & 3) } func (r *FetchResp) Bytes() ([]byte, error) { var buf buffer enc := NewEncoder(&buf) enc.EncodeInt32(0) // placeholder enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV1 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeError(part.Err) enc.EncodeInt64(part.TipOffset) if r.Version >= KafkaV4 { enc.EncodeInt64(part.LastStableOffset) if r.Version >= KafkaV5 { enc.EncodeInt64(part.LogStartOffset) } enc.EncodeArrayLen(len(part.AbortedTransactions)) for _, trans := range part.AbortedTransactions { enc.EncodeInt64(trans.ProducerID) enc.EncodeInt64(trans.FirstOffset) } } i := len(buf) enc.EncodeInt32(0) // placeholder // NOTE(caleb): writing compressed fetch response isn't implemented // for now, since that's not needed for clients. n, err := writeMessageSet(&buf, part.Messages, CompressionNone) if err != nil { return nil, err } binary.BigEndian.PutUint32(buf[i:i+4], uint32(n)) } } if enc.Err() != nil { return nil, enc.Err() } binary.BigEndian.PutUint32(buf[:4], uint32(len(buf)-4)) return []byte(buf), nil } func ReadFetchResp(r io.Reader) (*FetchResp, error) { return ReadVersionedFetchResp(r, KafkaV0) } func ReadVersionedFetchResp(r io.Reader, version int16) (*FetchResp, error) { var err error var resp FetchResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if resp.Version >= KafkaV1 { resp.ThrottleTime = dec.DecodeDuration32() } numTopics, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]FetchRespTopic, numTopics) for ti := range resp.Topics { var topic = &resp.Topics[ti] topic.Name = dec.DecodeString() numPartitions, err := dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]FetchRespPartition, numPartitions) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = dec.DecodeInt32() part.Err = errFromNo(dec.DecodeInt16()) part.TipOffset = dec.DecodeInt64() if resp.Version >= KafkaV4 { part.LastStableOffset = dec.DecodeInt64() if resp.Version >= KafkaV5 { part.LogStartOffset = dec.DecodeInt64() } numAbortedTransactions, err := dec.DecodeArrayLen() if err != nil { return nil, err } part.AbortedTransactions = make([]FetchRespAbortedTransaction, numAbortedTransactions) for i := range part.AbortedTransactions { part.AbortedTransactions[i].ProducerID = dec.DecodeInt64() part.AbortedTransactions[i].FirstOffset = dec.DecodeInt64() } } if dec.Err() != nil { return nil, dec.Err() } msgSetSize := dec.DecodeInt32() if dec.Err() != nil { return nil, dec.Err() } br := bufio.NewReader(io.LimitReader(r, int64(msgSetSize))) for { // try to figure out what is next - MessageSet or RecordBatch b, err := br.Peek(17) if err == io.EOF { break } if err != nil { return nil, err } part.MessageVersion = MessageVersion(int8(b[16])) if part.MessageVersion < MessageV2 { // Response contains MessageSet if part.Messages, err = readMessageSet(br, msgSetSize); err != nil { return nil, err } for _, msg := range part.Messages { msg.Topic = topic.Name msg.Partition = part.ID msg.TipOffset = part.TipOffset } } else if part.MessageVersion == MessageV2 { // Response contains RecordBatch batch, err := readRecordBatch(br) if (err == ErrNotEnoughData || err == io.EOF || err == io.ErrUnexpectedEOF) && len(part.RecordBatches) > 0 { // it was partial batch so we just ignore it break } if err != nil { return nil, err } part.RecordBatches = append(part.RecordBatches, batch) } else { return nil, errors.New("Incorrect message byte") } } } } if dec.Err() != nil { return nil, dec.Err() } return &resp, nil } const ( CorrelationTypeGroup int8 = 0 CorrelationTypeTransaction = 1 ) type ConsumerMetadataReq struct { RequestHeader ConsumerGroup string CoordinatorType int8 // >= KafkaV1 } func ReadConsumerMetadataReq(r io.Reader) (*ConsumerMetadataReq, error) { var req ConsumerMetadataReq dec := NewDecoder(r) decodeHeader(dec, &req) req.ConsumerGroup = dec.DecodeString() if req.version >= KafkaV1 { req.CoordinatorType = dec.DecodeInt8() } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r ConsumerMetadataReq) Kind() int16 { return ConsumerMetadataReqKind } func (r *ConsumerMetadataReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) enc.EncodeString(r.ConsumerGroup) if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *ConsumerMetadataReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type ConsumerMetadataResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration // >= KafkaV1 Err error ErrMsg string // >= KafkaV1 CoordinatorID int32 CoordinatorHost string CoordinatorPort int32 } func ReadConsumerMetadataResp(r io.Reader) (*ConsumerMetadataResp, error) { return ReadVersionedConsumerMetadataResp(r, KafkaV0) } func ReadVersionedConsumerMetadataResp(r io.Reader, version int16) (*ConsumerMetadataResp, error) { var resp ConsumerMetadataResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if version >= KafkaV1 { resp.ThrottleTime = dec.DecodeDuration32() } resp.Err = errFromNo(dec.DecodeInt16()) if version >= KafkaV1 { resp.ErrMsg = dec.DecodeString() } resp.CoordinatorID = dec.DecodeInt32() resp.CoordinatorHost = dec.DecodeString() resp.CoordinatorPort = dec.DecodeInt32() if err := dec.Err(); err != nil { return nil, err } return &resp, nil } func (r *ConsumerMetadataResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV1 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeError(r.Err) if r.Version >= KafkaV1 { enc.EncodeString(r.ErrMsg) } enc.EncodeInt32(r.CoordinatorID) enc.EncodeString(r.CoordinatorHost) enc.EncodeInt32(r.CoordinatorPort) if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } type OffsetCommitReq struct { RequestHeader ConsumerGroup string GroupGenerationID int32 // >= KafkaV1 only MemberID string // >= KafkaV1 only RetentionTime int64 // >= KafkaV2 only Topics []OffsetCommitReqTopic } type OffsetCommitReqTopic struct { Name string Partitions []OffsetCommitReqPartition } type OffsetCommitReqPartition struct { ID int32 Offset int64 TimeStamp time.Time // == KafkaV1 only Metadata string } func ReadOffsetCommitReq(r io.Reader) (*OffsetCommitReq, error) { var req OffsetCommitReq dec := NewDecoder(r) decodeHeader(dec, &req) req.ConsumerGroup = dec.DecodeString() if req.version >= KafkaV1 { req.GroupGenerationID = dec.DecodeInt32() req.MemberID = dec.DecodeString() } if req.version >= KafkaV2 { req.RetentionTime = dec.DecodeInt64() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]OffsetCommitReqTopic, len) for ti := range req.Topics { var topic = &req.Topics[ti] topic.Name = dec.DecodeString() len, err := dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]OffsetCommitReqPartition, len) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = dec.DecodeInt32() part.Offset = dec.DecodeInt64() if req.version == KafkaV1 { part.TimeStamp = time.Unix(0, dec.DecodeInt64()*int64(time.Millisecond)) } part.Metadata = dec.DecodeString() } } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r OffsetCommitReq) Kind() int16 { return OffsetCommitReqKind } func (r *OffsetCommitReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) enc.EncodeString(r.ConsumerGroup) if r.version >= KafkaV1 { enc.EncodeInt32(r.GroupGenerationID) enc.EncodeString(r.MemberID) } if r.version >= KafkaV2 { enc.EncodeInt64(r.RetentionTime) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeInt64(part.Offset) if r.version == KafkaV1 { // TODO(husio) is this really in milliseconds? enc.EncodeInt64(part.TimeStamp.UnixNano() / int64(time.Millisecond)) } enc.EncodeString(part.Metadata) } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *OffsetCommitReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type OffsetCommitResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration // >= KafkaV3 only Topics []OffsetCommitRespTopic } type OffsetCommitRespTopic struct { Name string Partitions []OffsetCommitRespPartition } type OffsetCommitRespPartition struct { ID int32 Err error } func ReadOffsetCommitResp(r io.Reader) (*OffsetCommitResp, error) { return ReadVersionedOffsetCommitResp(r, KafkaV0) } func ReadVersionedOffsetCommitResp(r io.Reader, version int16) (*OffsetCommitResp, error) { var resp OffsetCommitResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if version >= KafkaV3 { resp.ThrottleTime = dec.DecodeDuration32() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]OffsetCommitRespTopic, len) for ti := range resp.Topics { var t = &resp.Topics[ti] t.Name = dec.DecodeString() len, err := dec.DecodeArrayLen() if err != nil { return nil, err } t.Partitions = make([]OffsetCommitRespPartition, len) for pi := range t.Partitions { var p = &t.Partitions[pi] p.ID = dec.DecodeInt32() p.Err = errFromNo(dec.DecodeInt16()) } } if err := dec.Err(); err != nil { return nil, err } return &resp, nil } func (r *OffsetCommitResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV3 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.Topics)) for _, t := range r.Topics { enc.EncodeString(t.Name) enc.EncodeArrayLen(len(t.Partitions)) for _, p := range t.Partitions { enc.EncodeInt32(p.ID) enc.EncodeError(p.Err) } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } type OffsetFetchReq struct { RequestHeader ConsumerGroup string Topics []OffsetFetchReqTopic } type OffsetFetchReqTopic struct { Name string Partitions []int32 } func ReadOffsetFetchReq(r io.Reader) (*OffsetFetchReq, error) { var req OffsetFetchReq dec := NewDecoder(r) decodeHeader(dec, &req) req.ConsumerGroup = dec.DecodeString() len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]OffsetFetchReqTopic, len) for ti := range req.Topics { var topic = &req.Topics[ti] topic.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]int32, len) for pi := range topic.Partitions { topic.Partitions[pi] = dec.DecodeInt32() } } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r OffsetFetchReq) Kind() int16 { return OffsetFetchReqKind } func (r *OffsetFetchReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) enc.EncodeString(r.ConsumerGroup) enc.EncodeArrayLen(len(r.Topics)) for _, t := range r.Topics { enc.EncodeString(t.Name) enc.EncodeInt32s(t.Partitions) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *OffsetFetchReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type OffsetFetchResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration // >= KafkaV3 Topics []OffsetFetchRespTopic Err error // >= KafkaV2 } type OffsetFetchRespTopic struct { Name string Partitions []OffsetFetchRespPartition } type OffsetFetchRespPartition struct { ID int32 Offset int64 Metadata string Err error } func ReadOffsetFetchResp(r io.Reader) (*OffsetFetchResp, error) { return ReadVersionedOffsetFetchResp(r, KafkaV0) } func ReadVersionedOffsetFetchResp(r io.Reader, version int16) (*OffsetFetchResp, error) { var resp OffsetFetchResp dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() resp.Version = version if version >= KafkaV3 { resp.ThrottleTime = dec.DecodeDuration32() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]OffsetFetchRespTopic, len) for ti := range resp.Topics { var t = &resp.Topics[ti] t.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } t.Partitions = make([]OffsetFetchRespPartition, len) for pi := range t.Partitions { var p = &t.Partitions[pi] p.ID = dec.DecodeInt32() p.Offset = dec.DecodeInt64() p.Metadata = dec.DecodeString() p.Err = errFromNo(dec.DecodeInt16()) } } if version >= KafkaV2 { resp.Err = errFromNo(dec.DecodeInt16()) } if err := dec.Err(); err != nil { return nil, err } return &resp, nil } func (r *OffsetFetchResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV3 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeInt64(part.Offset) enc.EncodeString(part.Metadata) enc.EncodeError(part.Err) } } if r.Version >= KafkaV2 { enc.EncodeError(r.Err) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } type ProduceReq struct { RequestHeader Compression Compression // only used when sending ProduceReqs TransactionalID string RequiredAcks int16 Timeout time.Duration Topics []ProduceReqTopic } type ProduceReqTopic struct { Name string Partitions []ProduceReqPartition } type ProduceReqPartition struct { ID int32 Messages []*Message } func ReadProduceReq(r io.Reader) (*ProduceReq, error) { var req ProduceReq dec := NewDecoder(r) decodeHeader(dec, &req) if req.version >= KafkaV3 { req.TransactionalID = dec.DecodeString() } req.RequiredAcks = dec.DecodeInt16() req.Timeout = time.Duration(dec.DecodeInt32()) * time.Millisecond len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]ProduceReqTopic, len) for ti := range req.Topics { var topic = &req.Topics[ti] topic.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]ProduceReqPartition, len) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = dec.DecodeInt32() if dec.Err() != nil { return nil, dec.Err() } msgSetSize := dec.DecodeInt32() if dec.Err() != nil { return nil, dec.Err() } var err error if part.Messages, err = readMessageSet(r, msgSetSize); err != nil { return nil, err } } } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r ProduceReq) Kind() int16 { return ProduceReqKind } func (r *ProduceReq) Bytes() ([]byte, error) { var buf buffer enc := NewEncoder(&buf) encodeHeader(enc, r) if r.version >= KafkaV3 { enc.EncodeString(r.TransactionalID) } enc.EncodeInt16(r.RequiredAcks) enc.EncodeInt32(int32(r.Timeout / time.Millisecond)) enc.EncodeArrayLen(len(r.Topics)) for _, t := range r.Topics { enc.EncodeString(t.Name) enc.EncodeArrayLen(len(t.Partitions)) for _, p := range t.Partitions { enc.EncodeInt32(p.ID) i := len(buf) enc.EncodeInt32(0) // placeholder n, err := writeMessageSet(&buf, p.Messages, r.Compression) if err != nil { return nil, err } binary.BigEndian.PutUint32(buf[i:i+4], uint32(n)) } } if enc.Err() != nil { return nil, enc.Err() } binary.BigEndian.PutUint32(buf[0:4], uint32(len(buf)-4)) return []byte(buf), nil } func (r *ProduceReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type ProduceResp struct { Version int16 CorrelationID int32 Topics []ProduceRespTopic ThrottleTime time.Duration } type ProduceRespTopic struct { Name string Partitions []ProduceRespPartition } type ProduceRespPartition struct { ID int32 Err error Offset int64 LogAppendTime int64 } func (r *ProduceResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeError(part.Err) enc.EncodeInt64(part.Offset) if r.Version >= KafkaV2 { enc.EncodeInt64(part.LogAppendTime) } } } if r.Version >= KafkaV1 { enc.EncodeDuration(r.ThrottleTime) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func ReadProduceResp(r io.Reader) (*ProduceResp, error) { return ReadVersionedProduceResp(r, KafkaV0) } func ReadVersionedProduceResp(r io.Reader, version int16) (*ProduceResp, error) { var resp ProduceResp dec := NewDecoder(r) resp.Version = version // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]ProduceRespTopic, len) for ti := range resp.Topics { var t = &resp.Topics[ti] t.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } t.Partitions = make([]ProduceRespPartition, len) for pi := range t.Partitions { var p = &t.Partitions[pi] p.ID = dec.DecodeInt32() p.Err = errFromNo(dec.DecodeInt16()) p.Offset = dec.DecodeInt64() if resp.Version >= KafkaV2 { p.LogAppendTime = dec.DecodeInt64() } } } if resp.Version >= KafkaV1 { resp.ThrottleTime = dec.DecodeDuration32() } if err := dec.Err(); err != nil { return nil, err } return &resp, nil } type OffsetReq struct { RequestHeader ReplicaID int32 IsolationLevel int8 Topics []OffsetReqTopic } type OffsetReqTopic struct { Name string Partitions []OffsetReqPartition } type OffsetReqPartition struct { ID int32 TimeMs int64 // cannot be time.Time because of negative values MaxOffsets int32 // == KafkaV0 only } func ReadOffsetReq(r io.Reader) (*OffsetReq, error) { var req OffsetReq dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() // api key _ = dec.DecodeInt16() req.version = dec.DecodeInt16() req.correlationID = dec.DecodeInt32() req.ClientID = dec.DecodeString() req.ReplicaID = dec.DecodeInt32() if req.version >= KafkaV2 { req.IsolationLevel = dec.DecodeInt8() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.Topics = make([]OffsetReqTopic, len) for ti := range req.Topics { var topic = &req.Topics[ti] topic.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } topic.Partitions = make([]OffsetReqPartition, len) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = dec.DecodeInt32() part.TimeMs = dec.DecodeInt64() if req.version == KafkaV0 { part.MaxOffsets = dec.DecodeInt32() } } } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r OffsetReq) Kind() int16 { return OffsetReqKind } func (r *OffsetReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) //enc.Encode(r.ReplicaID) enc.EncodeInt32(-1) if r.version >= KafkaV2 { enc.EncodeInt8(r.IsolationLevel) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeInt64(part.TimeMs) if r.version == KafkaV0 { enc.EncodeInt32(part.MaxOffsets) } } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *OffsetReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type OffsetResp struct { Version int16 CorrelationID int32 ThrottleTime time.Duration Topics []OffsetRespTopic } type OffsetRespTopic struct { Name string Partitions []OffsetRespPartition } type OffsetRespPartition struct { ID int32 Err error TimeStamp time.Time // >= KafkaV1 only Offsets []int64 // used in KafkaV0 } func ReadOffsetResp(r io.Reader) (*OffsetResp, error) { return ReadVersionedOffsetResp(r, KafkaV0) } func ReadVersionedOffsetResp(r io.Reader, version int16) (*OffsetResp, error) { var resp OffsetResp dec := NewDecoder(r) resp.Version = version // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if version >= KafkaV2 { resp.ThrottleTime = dec.DecodeDuration32() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.Topics = make([]OffsetRespTopic, len) for ti := range resp.Topics { var t = &resp.Topics[ti] t.Name = dec.DecodeString() len, err = dec.DecodeArrayLen() if err != nil { return nil, err } t.Partitions = make([]OffsetRespPartition, len) for pi := range t.Partitions { var p = &t.Partitions[pi] p.ID = dec.DecodeInt32() p.Err = errFromNo(dec.DecodeInt16()) if version >= KafkaV1 { p.TimeStamp = time.Unix(0, dec.DecodeInt64()*int64(time.Millisecond)) // in kafka >= KafkaV1 offset can be only one number. // But for compatibility we still use slice offset := dec.DecodeInt64() p.Offsets = []int64{offset} } else { len, err = dec.DecodeArrayLen() if err != nil { return nil, err } p.Offsets = make([]int64, len) for oi := range p.Offsets { p.Offsets[oi] = dec.DecodeInt64() } } } } if err := dec.Err(); err != nil { return nil, err } return &resp, nil } func (r *OffsetResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV2 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.Topics)) for _, topic := range r.Topics { enc.EncodeString(topic.Name) enc.EncodeArrayLen(len(topic.Partitions)) for _, part := range topic.Partitions { enc.EncodeInt32(part.ID) enc.EncodeError(part.Err) if r.Version >= KafkaV1 { enc.EncodeInt64(part.TimeStamp.UnixNano() / int64(time.Millisecond)) // in kafka >= KafkaV1 offset can be only one value. // In this case we use first element of slice var offset int64 if len(part.Offsets) > 0 { offset = part.Offsets[0] } enc.EncodeInt64(offset) } else { enc.EncodeInt64s(part.Offsets) } } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } type ReplicaAssignment struct { Partition int32 Replicas []int32 } type ConfigEntry struct { ConfigName string ConfigValue string } type TopicInfo struct { Topic string NumPartitions int32 ReplicationFactor int16 ReplicaAssignments []ReplicaAssignment ConfigEntries []ConfigEntry } type CreateTopicsReq struct { RequestHeader CreateTopicsRequests []TopicInfo Timeout time.Duration ValidateOnly bool } func ReadCreateTopicsReq(r io.Reader) (*CreateTopicsReq, error) { var req CreateTopicsReq dec := NewDecoder(r) decodeHeader(dec, &req) len, err := dec.DecodeArrayLen() if err != nil { return nil, err } req.CreateTopicsRequests = make([]TopicInfo, len) for i := range req.CreateTopicsRequests { ti := TopicInfo{} ti.Topic = dec.DecodeString() ti.NumPartitions = dec.DecodeInt32() ti.ReplicationFactor = dec.DecodeInt16() len, err := dec.DecodeArrayLen() if err != nil { return nil, err } ti.ReplicaAssignments = make([]ReplicaAssignment, len) for j := range ti.ReplicaAssignments { ra := ReplicaAssignment{} ra.Partition = dec.DecodeInt32() len, err = dec.DecodeArrayLen() ra.Replicas = make([]int32, len) for k := range ra.Replicas { ra.Replicas[k] = dec.DecodeInt32() } ti.ReplicaAssignments[j] = ra } len, err = dec.DecodeArrayLen() ti.ConfigEntries = make([]ConfigEntry, len) for l := range ti.ConfigEntries { ce := ConfigEntry{} ce.ConfigName = dec.DecodeString() ce.ConfigValue = dec.DecodeString() ti.ConfigEntries[l] = ce } req.CreateTopicsRequests[i] = ti } req.Timeout = dec.DecodeDuration32() if req.version >= KafkaV1 { req.ValidateOnly = dec.DecodeInt8() != 0 } if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r CreateTopicsReq) Kind() int16 { return CreateTopicsReqKind } func (r *CreateTopicsReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) enc.EncodeArrayLen(len(r.CreateTopicsRequests)) for _, topicInfo := range r.CreateTopicsRequests { enc.EncodeString(topicInfo.Topic) enc.EncodeInt32(topicInfo.NumPartitions) enc.EncodeInt16(topicInfo.ReplicationFactor) enc.EncodeArrayLen(len(topicInfo.ReplicaAssignments)) for _, replicaAssignment := range topicInfo.ReplicaAssignments { enc.EncodeInt32(replicaAssignment.Partition) enc.EncodeInt32s(replicaAssignment.Replicas) } enc.EncodeArrayLen(len(topicInfo.ConfigEntries)) for _, ce := range topicInfo.ConfigEntries { enc.EncodeString(ce.ConfigName) enc.EncodeString(ce.ConfigValue) } } enc.EncodeDuration(r.Timeout) if r.version >= KafkaV1 { enc.EncodeInt8(boolToInt8(r.ValidateOnly)) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *CreateTopicsReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type TopicError struct { Topic string ErrorCode int16 ErrorMessage string // >= KafkaV1 Err error } type CreateTopicsResp struct { Version int16 CorrelationID int32 TopicErrors []TopicError ThrottleTime time.Duration // >= KafkaV2 } func (r *CreateTopicsResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) if r.Version >= KafkaV2 { enc.EncodeDuration(r.ThrottleTime) } enc.EncodeArrayLen(len(r.TopicErrors)) for _, te := range r.TopicErrors { enc.EncodeString(te.Topic) enc.EncodeInt16(te.ErrorCode) if r.Version >= KafkaV1 { enc.EncodeString(te.ErrorMessage) } } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func ReadCreateTopicsResp(r io.Reader) (*CreateTopicsResp, error) { return ReadVersionedCreateTopicsResp(r, KafkaV0) } func ReadVersionedCreateTopicsResp(r io.Reader, version int16) (*CreateTopicsResp, error) { var resp CreateTopicsResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() if resp.Version >= KafkaV2 { resp.ThrottleTime = dec.DecodeDuration32() } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.TopicErrors = make([]TopicError, len) for i := range resp.TopicErrors { var te = &resp.TopicErrors[i] te.Topic = dec.DecodeString() te.ErrorCode = dec.DecodeInt16() if resp.Version >= KafkaV1 { te.ErrorMessage = dec.DecodeString() } te.Err = errFromNo(te.ErrorCode) } if dec.Err() != nil { return nil, dec.Err() } return &resp, nil } type buffer []byte func (b *buffer) Write(p []byte) (int, error) { *b = append(*b, p...) return len(p), nil } type APIVersionsReq struct { RequestHeader } func ReadAPIVersionsReq(r io.Reader) (*APIVersionsReq, error) { var req APIVersionsReq dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() // api key + api version _ = dec.DecodeInt32() req.correlationID = dec.DecodeInt32() req.ClientID = dec.DecodeString() if dec.Err() != nil { return nil, dec.Err() } return &req, nil } func (r APIVersionsReq) Kind() int16 { return APIVersionsReqKind } func (r *APIVersionsReq) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) encodeHeader(enc, r) if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func (r *APIVersionsReq) WriteTo(w io.Writer) (int64, error) { b, err := r.Bytes() if err != nil { return 0, err } n, err := w.Write(b) return int64(n), err } type APIVersionsResp struct { Version int16 CorrelationID int32 APIVersions []SupportedVersion ThrottleTime time.Duration } type SupportedVersion struct { APIKey int16 MinVersion int16 MaxVersion int16 } func (r *APIVersionsResp) Bytes() ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) // message size - for now just placeholder enc.EncodeInt32(0) enc.EncodeInt32(r.CorrelationID) //error code enc.EncodeInt16(0) enc.EncodeArrayLen(len(r.APIVersions)) for _, api := range r.APIVersions { enc.EncodeInt16(api.APIKey) enc.EncodeInt16(api.MinVersion) enc.EncodeInt16(api.MaxVersion) } if r.Version >= KafkaV1 { enc.EncodeDuration(r.ThrottleTime) } if enc.Err() != nil { return nil, enc.Err() } // update the message size information b := buf.Bytes() binary.BigEndian.PutUint32(b, uint32(len(b)-4)) return b, nil } func ReadAPIVersionsResp(r io.Reader) (*APIVersionsResp, error) { return ReadVersionedAPIVersionsResp(r, KafkaV0) } func ReadVersionedAPIVersionsResp(r io.Reader, version int16) (*APIVersionsResp, error) { var resp APIVersionsResp resp.Version = version dec := NewDecoder(r) // total message size _ = dec.DecodeInt32() resp.CorrelationID = dec.DecodeInt32() errcode := dec.DecodeInt16() if errcode != 0 { //TODO fill app error return nil, fmt.Errorf("versioning error: %d", errcode) } len, err := dec.DecodeArrayLen() if err != nil { return nil, err } resp.APIVersions = make([]SupportedVersion, len) for i := range resp.APIVersions { api := &resp.APIVersions[i] api.APIKey = dec.DecodeInt16() api.MinVersion = dec.DecodeInt16() api.MaxVersion = dec.DecodeInt16() } if version >= KafkaV1 { resp.ThrottleTime = dec.DecodeDuration32() } if dec.Err() != nil { return nil, dec.Err() } return &resp, nil } kafka-2.1.1/v2/proto/messages_go_1.7_test.go000066400000000000000000000053641356004474300205470ustar00rootroot00000000000000// +build !go1.8 package proto import ( "bytes" "fmt" "reflect" "testing" "time" ) func TestProduceRequest(t *testing.T) { req := &ProduceReq{ CorrelationID: 241, ClientID: "test", RequiredAcks: RequiredAcksAll, Timeout: time.Second, Topics: []ProduceReqTopic{ { Name: "foo", Partitions: []ProduceReqPartition{ { ID: 0, Messages: []*Message{ { Offset: 0, Crc: 3099221847, Key: []byte("foo"), Value: []byte("bar"), }, }, }, }, }, }, } tests := []struct { Compression Compression Expected []byte }{ { CompressionNone, []byte{0x0, 0x0, 0x0, 0x49, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72}, }, { CompressionGzip, []byte{0x0, 0x0, 0x0, 0x6d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x44, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38, 0x9d, 0x81, 0x74, 0xc4, 0x0, 0x1, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x2a, 0x1f, 0x8b, 0x8, 0x0, 0x0, 0x9, 0x6e, 0x88, 0x0, 0xff, 0x62, 0x40, 0x0, 0x91, 0x1d, 0xbb, 0xe2, 0xc3, 0xc1, 0x2c, 0xe6, 0xb4, 0xfc, 0x7c, 0x10, 0x95, 0x94, 0x58, 0x4, 0x8, 0x0, 0x0, 0xff, 0xff, 0xa0, 0xbc, 0x10, 0xc2, 0x20, 0x0, 0x0, 0x0}, }, { CompressionSnappy, []byte{0x0, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x33, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x27, 0x2e, 0xd4, 0xed, 0xcd, 0x0, 0x2, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x19, 0x20, 0x0, 0x0, 0x19, 0x1, 0x10, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x5, 0xf, 0x28, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72}, }, } for _, tt := range tests { req.Compression = tt.Compression testRequestSerialization(t, req) b, _ := req.Bytes() if !bytes.Equal(b, tt.Expected) { fmt.Printf("%#v\n", tt.Expected) fmt.Printf("%#v\n", b) t.Fatalf("expected different bytes representation: %#v", b) } r, _ := ReadProduceReq(bytes.NewBuffer(tt.Expected)) req.Compression = CompressionNone // isn't set on deserialization if !reflect.DeepEqual(r, req) { t.Fatalf("malformed request: %#v", r) } } } kafka-2.1.1/v2/proto/messages_go_1.8_test.go000066400000000000000000000054101356004474300205400ustar00rootroot00000000000000// +build go1.8 package proto import ( "bytes" "fmt" "reflect" "testing" "time" ) func TestProduceRequest(t *testing.T) { req := &ProduceReq{ RequestHeader: RequestHeader{correlationID: 241, ClientID: "test"}, RequiredAcks: RequiredAcksAll, Timeout: time.Second, Topics: []ProduceReqTopic{ { Name: "foo", Partitions: []ProduceReqPartition{ { ID: 0, Messages: []*Message{ { Offset: 0, Crc: 3099221847, Key: []byte("foo"), Value: []byte("bar"), }, }, }, }, }, }, } tests := []struct { Compression Compression Expected []byte }{ { CompressionNone, []byte{0x0, 0x0, 0x0, 0x49, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72}, }, { CompressionGzip, []byte{0x0, 0x0, 0x0, 0x6d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x44, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38, 0x8a, 0xa7, 0x46, 0xe2, 0x0, 0x1, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x2a, 0x1f, 0x8b, 0x8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x62, 0x40, 0x0, 0x91, 0x1d, 0xbb, 0xe2, 0xc3, 0xc1, 0x2c, 0xe6, 0xb4, 0xfc, 0x7c, 0x10, 0x95, 0x94, 0x58, 0x4, 0x8, 0x0, 0x0, 0xff, 0xff, 0xa0, 0xbc, 0x10, 0xc2, 0x20, 0x0, 0x0, 0x0}, }, { CompressionSnappy, []byte{0x0, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x33, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x27, 0x2e, 0xd4, 0xed, 0xcd, 0x0, 0x2, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x19, 0x20, 0x0, 0x0, 0x19, 0x1, 0x10, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x5, 0xf, 0x28, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72}, }, } for _, tt := range tests { req.Compression = tt.Compression testRequestSerialization(t, req) b, _ := req.Bytes() if !bytes.Equal(b, tt.Expected) { fmt.Printf("%#v\n", tt.Expected) fmt.Printf("%#v\n", b) t.Fatalf("expected different bytes representation: %#v", b) } r, _ := ReadProduceReq(bytes.NewBuffer(tt.Expected)) req.Compression = CompressionNone // isn't set on deserialization if !reflect.DeepEqual(r, req) { t.Fatalf("malformed request: %#v", r) } } } kafka-2.1.1/v2/proto/messages_test.go000066400000000000000000001706731356004474300175030ustar00rootroot00000000000000package proto import ( "bytes" "fmt" "reflect" "testing" "time" ) func testRequestSerialization(t *testing.T, r Request) { var buf bytes.Buffer if n, err := r.WriteTo(&buf); err != nil { t.Fatalf("could not write request to buffer: %s", err) } else if n != int64(buf.Len()) { t.Fatalf("writer returned invalid number of bytes written %d != %d", n, buf.Len()) } b, err := r.Bytes() if err != nil { t.Fatalf("could not convert request to bytes: %s", err) } if !bytes.Equal(b, buf.Bytes()) { t.Fatal("Bytes() and WriteTo() serialized request is of different form") } } func TestMetadataRequest(t *testing.T) { req1 := &MetadataReq{ RequestHeader: RequestHeader{correlationID: 123, ClientID: "testcli", version: KafkaV0}, Topics: nil, } testRequestSerialization(t, req1) b, _ := req1.Bytes() expected := []byte{0x0, 0x0, 0x0, 0x15, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x7, 0x74, 0x65, 0x73, 0x74, 0x63, 0x6c, 0x69, 0x0, 0x0, 0x0, 0x0} if !bytes.Equal(b, expected) { t.Fatalf("expected different bytes representation: %v", b) } req2 := &MetadataReq{ RequestHeader: RequestHeader{correlationID: 123, ClientID: "testcli"}, Topics: []string{"foo", "bar"}, } testRequestSerialization(t, req2) b, _ = req2.Bytes() expected = []byte{0x0, 0x0, 0x0, 0x1f, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x7, 0x74, 0x65, 0x73, 0x74, 0x63, 0x6c, 0x69, 0x0, 0x0, 0x0, 0x2, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x3, 0x62, 0x61, 0x72} if !bytes.Equal(b, expected) { t.Fatalf("expected different bytes representation: %v", b) } r, _ := ReadMetadataReq(bytes.NewBuffer(expected)) if !reflect.DeepEqual(r, req2) { t.Fatalf("malformed request: %#v", r) } req3 := &MetadataReq{ RequestHeader: RequestHeader{correlationID: 123, ClientID: "testcli", version: KafkaV4}, Topics: nil, AllowAutoTopicCreation: true, } testRequestSerialization(t, req3) b3, _ := req3.Bytes() expected3 := []byte{0x0, 0x0, 0x0, 0x16, 0x0, 0x3, 0x0, 0x4, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x7, 0x74, 0x65, 0x73, 0x74, 0x63, 0x6c, 0x69, 0xFF, 0xFF, 0xFF, 0xFF, 0x1} if !bytes.Equal(b3, expected3) { t.Fatalf("expected different bytes representation: %v ( expected %v)", b3, expected3) } } func TestMetadataResponse(t *testing.T) { msgb := []byte{0x0, 0x0, 0x1, 0xc7, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0xc0, 0x10, 0x0, 0xb, 0x31, 0x37, 0x32, 0x2e, 0x31, 0x37, 0x2e, 0x34, 0x32, 0x2e, 0x31, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x12, 0x0, 0xb, 0x31, 0x37, 0x32, 0x2e, 0x31, 0x37, 0x2e, 0x34, 0x32, 0x2e, 0x31, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x11, 0x0, 0xb, 0x31, 0x37, 0x32, 0x2e, 0x31, 0x37, 0x2e, 0x34, 0x32, 0x2e, 0x31, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x13, 0x0, 0xb, 0x31, 0x37, 0x32, 0x2e, 0x31, 0x37, 0x2e, 0x34, 0x32, 0x2e, 0x31, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0xc0, 0x13, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0xc0, 0x10, 0x0, 0x0, 0xc0, 0x11, 0x0, 0x0, 0xc0, 0x12} resp, err := ReadVersionedMetadataResp(bytes.NewBuffer(msgb), 0) if err != nil { t.Fatalf("could not read metadata response: %s", err) } expected := &MetadataResp{ CorrelationID: 123, Brokers: []MetadataRespBroker{ {NodeID: 49168, Host: "172.17.42.1", Port: 49168}, {NodeID: 49170, Host: "172.17.42.1", Port: 49170}, {NodeID: 49169, Host: "172.17.42.1", Port: 49169}, {NodeID: 49171, Host: "172.17.42.1", Port: 49171}, }, Topics: []MetadataRespTopic{ { Name: "foo", Err: error(nil), Partitions: []MetadataRespPartition{ {Err: error(nil), ID: 2, Leader: 49171, Replicas: []int32{49171, 49168, 49169}, Isrs: []int32{49171, 49168, 49169}}, {Err: error(nil), ID: 5, Leader: 49170, Replicas: []int32{49170, 49168, 49169}, Isrs: []int32{49170, 49168, 49169}}, {Err: error(nil), ID: 4, Leader: 49169, Replicas: []int32{49169, 49171, 49168}, Isrs: []int32{49169, 49171, 49168}}, {Err: error(nil), ID: 1, Leader: 49170, Replicas: []int32{49170, 49171, 49168}, Isrs: []int32{49170, 49171, 49168}}, {Err: error(nil), ID: 3, Leader: 49168, Replicas: []int32{49168, 49169, 49170}, Isrs: []int32{49168, 49169, 49170}}, {Err: error(nil), ID: 0, Leader: 49169, Replicas: []int32{49169, 49170, 49171}, Isrs: []int32{49169, 49170, 49171}}, }, }, { Name: "test", Err: error(nil), Partitions: []MetadataRespPartition{ {Err: error(nil), ID: 1, Leader: 49169, Replicas: []int32{49169, 49170, 49171}, Isrs: []int32{49169, 49170, 49171}}, {Err: error(nil), ID: 0, Leader: 49168, Replicas: []int32{49168, 49169, 49170}, Isrs: []int32{49168, 49169, 49170}}, }, }, }, } if !reflect.DeepEqual(resp, expected) { t.Fatalf("expected different message: %#v", resp) } if b, err := resp.Bytes(); err != nil { t.Fatalf("cannot serialize response: %s", err) } else { if !bytes.Equal(b, msgb) { t.Fatalf("serialized representation different from expected: %#v", b) } } } func TestAPIVersionsResponse(t *testing.T) { respOrig := &APIVersionsResp{ CorrelationID: 1, APIVersions: []SupportedVersion{ SupportedVersion{ APIKey: 1, MinVersion: 0, MaxVersion: 2, }, }, } b, err := respOrig.Bytes() if err != nil { t.Fatal(err) } resp, err := ReadVersionedAPIVersionsResp(bytes.NewBuffer(b), respOrig.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respOrig, resp) { t.Errorf("Should be equal %+v %+v", respOrig, resp) } } func TestMetadataResponseVersions(t *testing.T) { expectedV1 := MetadataResp{ Version: 1, CorrelationID: 123, ControllerID: 5, Brokers: []MetadataRespBroker{ {NodeID: 49168, Host: "172.17.42.1", Port: 49168, Rack: "rack1"}, {NodeID: 49170, Host: "172.17.42.1", Port: 49170, Rack: "rack1"}, {NodeID: 49169, Host: "172.17.42.1", Port: 49169, Rack: "rack1"}, {NodeID: 49171, Host: "172.17.42.1", Port: 49171, Rack: "rack1"}, }, Topics: []MetadataRespTopic{ { Name: "foo", Err: error(nil), Partitions: []MetadataRespPartition{ {Err: error(nil), ID: 2, Leader: 49171, Replicas: []int32{49171, 49168, 49169}, Isrs: []int32{49171, 49168, 49169}}, {Err: error(nil), ID: 5, Leader: 49170, Replicas: []int32{49170, 49168, 49169}, Isrs: []int32{49170, 49168, 49169}}, {Err: error(nil), ID: 4, Leader: 49169, Replicas: []int32{49169, 49171, 49168}, Isrs: []int32{49169, 49171, 49168}}, {Err: error(nil), ID: 1, Leader: 49170, Replicas: []int32{49170, 49171, 49168}, Isrs: []int32{49170, 49171, 49168}}, {Err: error(nil), ID: 3, Leader: 49168, Replicas: []int32{49168, 49169, 49170}, Isrs: []int32{49168, 49169, 49170}}, {Err: error(nil), ID: 0, Leader: 49169, Replicas: []int32{49169, 49170, 49171}, Isrs: []int32{49169, 49170, 49171}}, }, IsInternal: true, }, { Name: "test", Err: error(nil), Partitions: []MetadataRespPartition{ {Err: error(nil), ID: 1, Leader: 49169, Replicas: []int32{49169, 49170, 49171}, Isrs: []int32{49169, 49170, 49171}}, {Err: error(nil), ID: 0, Leader: 49168, Replicas: []int32{49168, 49169, 49170}, Isrs: []int32{49168, 49169, 49170}}, }, }, }, } b, err := expectedV1.Bytes() if err != nil { t.Fatal(err) } resp, err := ReadVersionedMetadataResp(bytes.NewBuffer(b), expectedV1.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(&expectedV1, resp) { t.Fatalf("Different response expectedV1 = %+v, got = %+v", expectedV1, resp) } expectedV2 := expectedV1 expectedV2.Version = 2 expectedV2.ClusterID = "cluster id" b, err = expectedV2.Bytes() if err != nil { t.Fatal(err) } resp2, err := ReadVersionedMetadataResp(bytes.NewBuffer(b), expectedV2.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(&expectedV2, resp2) { t.Fatalf("Different response expectedV2 = %+v, got = %+v", expectedV2, resp2) } expectedV3 := expectedV2 expectedV3.Version = 3 expectedV3.ThrottleTime = time.Second b, err = expectedV3.Bytes() if err != nil { t.Fatal(err) } resp3, err := ReadVersionedMetadataResp(bytes.NewBuffer(b), expectedV3.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(&expectedV3, resp3) { t.Fatalf("Different response expectedV3 = %+v, got = %+v", expectedV3, resp3) } } func TestProduceResponse(t *testing.T) { msgb1 := []byte{0x0, 0x0, 0x0, 0x22, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x6, 0x66, 0x72, 0x75, 0x69, 0x74, 0x73, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5d, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} resp1, err := ReadVersionedProduceResp(bytes.NewBuffer(msgb1), KafkaV0) if err != nil { t.Fatalf("could not read metadata response: %s", err) } expected1 := &ProduceResp{ CorrelationID: 241, Topics: []ProduceRespTopic{ { Name: "fruits", Partitions: []ProduceRespPartition{ { ID: 93, Err: ErrUnknownTopicOrPartition, Offset: -1, }, }, }, }, } if !reflect.DeepEqual(resp1, expected1) { t.Fatalf("expected different message: %#v", resp1) } if b, err := resp1.Bytes(); err != nil { t.Fatalf("cannot serialize response: %s", err) } else { if !bytes.Equal(b, msgb1) { t.Fatalf("serialized representation different from expected: %#v", b) } } msgb2 := []byte{0x0, 0x0, 0x0, 0x1f, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} resp2, err := ReadVersionedProduceResp(bytes.NewBuffer(msgb2), KafkaV0) if err != nil { t.Fatalf("could not read metadata response: %s", err) } expected2 := &ProduceResp{ CorrelationID: 241, Topics: []ProduceRespTopic{ { Name: "foo", Partitions: []ProduceRespPartition{ { ID: 0, Err: error(nil), Offset: 1, }, }, }, }, } if !reflect.DeepEqual(resp2, expected2) { t.Fatalf("expected different message: %#v", resp2) } if b, err := resp2.Bytes(); err != nil { t.Fatalf("cannot serialize response: %s", err) } else { if !bytes.Equal(b, msgb2) { t.Fatalf("serialized representation different from expected: %#v", b) } } } func TestProduceResponseWithVersions(t *testing.T) { produceRespV1 := ProduceResp{ Version: 1, CorrelationID: 0, Topics: []ProduceRespTopic{ ProduceRespTopic{ Name: "", Partitions: []ProduceRespPartition{ ProduceRespPartition{ ID: 0, Err: nil, Offset: 0, LogAppendTime: 0, }, }, }, }, ThrottleTime: time.Second, } b, err := produceRespV1.Bytes() if err != nil { t.Fatal(err) } resp, err := ReadVersionedProduceResp(bytes.NewBuffer(b), produceRespV1.Version) if err != nil { t.Fatal(err) } //assert.Equal(t, produceRespV1, *resp, "Not equal") if !reflect.DeepEqual(produceRespV1, *resp) { t.Errorf("Not equal") } produceRespV2 := produceRespV1 produceRespV2.Version = KafkaV2 produceRespV2.Topics[0].Partitions[0].LogAppendTime = 5 b, err = produceRespV2.Bytes() if err != nil { t.Fatal(err) } resp, err = ReadVersionedProduceResp(bytes.NewBuffer(b), produceRespV2.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(produceRespV2, *resp) { t.Errorf("Not equal") } } func TestFetchRequest(t *testing.T) { req := &FetchReq{ RequestHeader: RequestHeader{correlationID: 241, ClientID: "test"}, ReplicaID: -1, MaxWaitTime: time.Second * 2, MinBytes: 12454, Topics: []FetchReqTopic{ { Name: "foo", Partitions: []FetchReqPartition{ {ID: 421, FetchOffset: 529, MaxBytes: 4921}, {ID: 0, FetchOffset: 11, MaxBytes: 92}, }, }, }, } testRequestSerialization(t, req) b, _ := req.Bytes() expected := []byte{0x0, 0x0, 0x0, 0x47, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x7, 0xd0, 0x0, 0x0, 0x30, 0xa6, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x1, 0xa5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x11, 0x0, 0x0, 0x13, 0x39, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb, 0x0, 0x0, 0x0, 0x5c} if !bytes.Equal(b, expected) { t.Fatalf("expected different bytes representation: %#v", b) } r, _ := ReadFetchReq(bytes.NewBuffer(expected)) if !reflect.DeepEqual(r, req) { t.Fatalf("malformed request: %#v", r) } } func TestFetchResponse(t *testing.T) { expected1 := &FetchResp{ CorrelationID: 241, Topics: []FetchRespTopic{ { Name: "foo", Partitions: []FetchRespPartition{ { ID: 0, Err: error(nil), TipOffset: 4, Messages: []*Message{ {Offset: 2, Crc: 0xb8ba5f57, Key: []byte("foo"), Value: []byte("bar"), Topic: "foo", Partition: 0, TipOffset: 4}, {Offset: 3, Crc: 0xb8ba5f57, Key: []byte("foo"), Value: []byte("bar"), Topic: "foo", Partition: 0, TipOffset: 4}, }, }, { ID: 1, Err: ErrUnknownTopicOrPartition, TipOffset: -1, Messages: nil, }, }, }, }, } tests := []struct { Bytes []byte RoundTrip bool // whether to compare re-serialized version Expected *FetchResp }{ { // CompressionNone Bytes: []byte{0x0, 0x0, 0x0, 0x75, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}, RoundTrip: true, Expected: expected1, }, { // CompressionGzip Bytes: []byte{0x0, 0x0, 0x0, 0x81, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x4c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x40, 0x7, 0x3c, 0x17, 0x35, 0x0, 0x1, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x32, 0x1f, 0x8b, 0x8, 0x0, 0x0, 0x9, 0x6e, 0x88, 0x0, 0xff, 0x62, 0x80, 0x0, 0x26, 0x20, 0x16, 0xd9, 0xb1, 0x2b, 0x3e, 0x1c, 0xcc, 0x63, 0x4e, 0xcb, 0xcf, 0x7, 0x51, 0x49, 0x89, 0x45, 0x50, 0x79, 0x66, 0x5c, 0xf2, 0x80, 0x0, 0x0, 0x0, 0xff, 0xff, 0xab, 0xcc, 0x83, 0x80, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}, RoundTrip: false, Expected: expected1, }, { // CompressionSnappy Bytes: []byte{0x0, 0x0, 0x0, 0x75, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x34, 0x6, 0x8d, 0xfe, 0xe2, 0x0, 0x2, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x26, 0x40, 0x0, 0x0, 0x9, 0x1, 0x20, 0x2, 0x0, 0x0, 0x0, 0x14, 0xb8, 0xba, 0x5f, 0x57, 0x5, 0xf, 0x28, 0x3, 0x66, 0x6f, 0x6f, 0x0, 0x0, 0x0, 0x3, 0x62, 0x61, 0x72, 0x5, 0x10, 0x8, 0x0, 0x0, 0x3, 0x5e, 0x20, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}, RoundTrip: false, Expected: expected1, }, { Bytes: []byte{0x0, 0x0, 0x0, 0x48, 0x0, 0x0, 0x0, 0xf1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x0, 0x3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}, RoundTrip: true, Expected: &FetchResp{ CorrelationID: 241, Topics: []FetchRespTopic{ { Name: "test", Partitions: []FetchRespPartition{ { ID: 0, Err: ErrUnknownTopicOrPartition, TipOffset: -1, Messages: nil, }, { ID: 1, Err: ErrUnknownTopicOrPartition, TipOffset: -1, Messages: nil, }, { ID: 8, Err: ErrUnknownTopicOrPartition, TipOffset: -1, Messages: nil, }, }, }, }, }, }, } for _, tt := range tests { resp, err := ReadVersionedFetchResp(bytes.NewBuffer(tt.Bytes), KafkaV0) if err != nil { t.Fatalf("could not read fetch response: %s", err) } if !reflect.DeepEqual(resp, tt.Expected) { t.Fatalf("expected different message: %#v", resp) } if tt.RoundTrip { b, err := resp.Bytes() if err != nil { t.Fatalf("cannot serialize response: %s", err) } if !bytes.Equal(b, tt.Bytes) { t.Fatalf("serialized representation different from expected: %#v", b) } } } } func TestOffsetFetchWithVersions(t *testing.T) { respV0 := OffsetFetchResp{ Version: 0, CorrelationID: 0, ThrottleTime: 0, Topics: []OffsetFetchRespTopic{ OffsetFetchRespTopic{ Name: "", Partitions: []OffsetFetchRespPartition{ OffsetFetchRespPartition{ ID: 0, Offset: 0, Metadata: "", Err: nil, }, }, }, }, Err: nil, } b0, err := respV0.Bytes() if err != nil { t.Fatal(err) } r0, err := ReadVersionedOffsetFetchResp(bytes.NewReader(b0), respV0.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respV0, *r0) { t.Errorf("Expected \n %#+v\n fot \n %#+v\n", respV0, *r0) } respV2 := respV0 respV2.Version = KafkaV2 respV2.Err = errnoToErr[-1] b2, err := respV2.Bytes() if err != nil { t.Fatal(err) } r2, err := ReadVersionedOffsetFetchResp(bytes.NewReader(b2), respV2.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respV2, *r2) { t.Errorf("Expected \n %#+v\n fot \n %#+v\n", respV2, *r2) } respV3 := respV2 respV3.Version = KafkaV3 respV3.ThrottleTime = 10 * time.Second b3, err := respV3.Bytes() if err != nil { t.Fatal(err) } r3, err := ReadVersionedOffsetFetchResp(bytes.NewReader(b3), respV3.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respV3, *r3) { t.Errorf("Expected \n %#+v\n fot \n %#+v\n", respV3, *r3) } } func TestFetchResponseWithVersions(t *testing.T) { // Test version 0 fetchRespV0 := FetchResp{ Version: KafkaV0, CorrelationID: 1, Topics: []FetchRespTopic{ FetchRespTopic{ Name: "Topic1", Partitions: []FetchRespPartition{ FetchRespPartition{ ID: 1, Err: nil, TipOffset: 1, Messages: nil, }, }, }, }, } b0, err := fetchRespV0.Bytes() if err != nil { t.Fatal(err) } resp0, err := ReadVersionedFetchResp(bytes.NewReader(b0), fetchRespV0.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(&fetchRespV0, resp0) { t.Fatalf("Not equal %+#v , %+#v", fetchRespV0, resp0) } // Test version 1 fetchRespV1 := fetchRespV0 fetchRespV1.Version = KafkaV1 fetchRespV1.ThrottleTime = time.Second b1, err := fetchRespV1.Bytes() if err != nil { t.Fatal(err) } resp1, err := ReadVersionedFetchResp(bytes.NewBuffer(b1), fetchRespV1.Version) if !reflect.DeepEqual(&fetchRespV1, resp1) { t.Fatalf("Not equal %+#v , %+#v", fetchRespV1, resp1) } // Test version 4 fetchRespV4 := fetchRespV1 fetchRespV4.Version = KafkaV4 fetchRespV4.Topics[0].Partitions[0].LastStableOffset = 1 fetchRespV4.Topics[0].Partitions[0].AbortedTransactions = []FetchRespAbortedTransaction{ FetchRespAbortedTransaction{ ProducerID: 1, FirstOffset: 1, }, } b4, err := fetchRespV4.Bytes() if err != nil { t.Fatal(err) } resp4, err := ReadVersionedFetchResp(bytes.NewBuffer(b4), fetchRespV4.Version) if !reflect.DeepEqual(&fetchRespV4, resp4) { t.Fatalf("Not equal %+#v , %+#v", fetchRespV4, resp4) } // Test version 5 fetchRespV5 := fetchRespV4 fetchRespV5.Version = KafkaV5 fetchRespV5.Topics[0].Partitions[0].LogStartOffset = 1 b5, err := fetchRespV5.Bytes() if err != nil { t.Fatal(err) } resp5, err := ReadVersionedFetchResp(bytes.NewBuffer(b5), fetchRespV5.Version) if !reflect.DeepEqual(&fetchRespV5, resp5) { t.Fatalf("Not equal %+#v , %+#v", fetchRespV5, resp5) } } func TestFetchResponseWithRecordBatchAndGZIP(t *testing.T) { data := []byte{ 0x00, 0x00, 0x00, 0x99, // Size 0x00, 0x00, 0x00, 0x04, // CorrelationID 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, 0x00, 0x01, // Number of topics 0x00, 0x06, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x34, // "topic4" 0x00, 0x00, 0x00, 0x01, // Number of partition 0x00, 0x00, 0x00, 0x00, // Partition id 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // High watermark offset 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Last stable offset 0xff, 0xff, 0xff, 0xff, // Numbet of aborted transactions // Record Batch 0x00, 0x00, 0x00, 0x63, // Size 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // First offset 0x00, 0x00, 0x00, 0x57, // Length 0x00, 0x00, 0x00, 0x00, // Partition leader epoch 0x02, // Magic byte (version of message) 0x04, 0xf7, 0xab, 0xb5, // CRC 0x00, 0x01, // Attributes 0x00, 0x00, 0x00, 0x00, // Last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // First timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Producer ID 0xff, 0xff, // Producer epoch 0xff, 0xff, 0xff, 0xff, // First Sequence 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x62, 0x60, 0x60, 0x60, 0x14, 0xcb, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0x61, 0x00, 0x00, 0xd3, 0x90, 0x6c, 0x82, 0x12, 0x00, 0x00, 0x00, } resp, err := ReadVersionedFetchResp(bytes.NewReader(data), 4) if err != nil { t.Fatal(err) } if string(resp.Topics[0].Partitions[0].RecordBatches[0].Records[0].Value) != "hello world" { t.Fatal("Wrong response") } } func TestFetchResponseWithRecordBatch(t *testing.T) { oneMessageFetchResponseV4error := []byte{ 0x00, 0x00, 0x00, 0x00, // Fake size (just random number) 0x00, 0x00, 0x00, 0x05, //CorrelationID 0x00, 0x00, 0x00, 0x00, //ThrottleTime 0x00, 0x00, 0x00, 0x01, //Number of topics 0x00, 0x06, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x34, // 'topic4' 0x00, 0x00, 0x00, 0x01, // number of Partition 0x00, 0x00, 0x00, 0x00, // partition id 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // High watermark Offset 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Last stable offset 0xff, 0xff, 0xff, 0xff, // Number of aborted Transactions //RecordBatch 0x00, 0x00, 0x00, 0x4f, // Size ??? 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FirstOffset 0x00, 0x00, 0x00, 0x43, // Length 0x00, 0x00, 0x00, 0x00, // PartitionLeaderEpoch 0x02, // Magic 0x34, 0xa1, 0x4e, 0x1d, // CRC 0x00, 0x00, //Attributes 0x00, 0x00, 0x00, 0x00, // LastOffsetDelte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //FirstTimestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // ProducerId 0xff, 0xff, //ProducerEpoch 0xff, 0xff, 0xff, 0xff, // FirstSequence //Record 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x22, 0x00, 0x00, 0x00, 0x01, 0x16, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x00, } resp, err := ReadVersionedFetchResp(bytes.NewReader(oneMessageFetchResponseV4error), 4) if err != nil { t.Fatal(err) } if string(resp.Topics[0].Partitions[0].RecordBatches[0].Records[0].Value) != "hello world" { t.Fatal("Wrong response") } } func TestFetchResponseWithRecordBatch2(t *testing.T) { oneMessageFetchResponseV4error := []byte{ 0x00, 0x00, 0x00, 0x85, // Size 0x00, 0x00, 0x00, 0x0a, // CorrelationID 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, 0x00, 0x01, // Number of topics 0x00, 0x06, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x33, // "topic3" 0x00, 0x00, 0x00, 0x01, // number of partitions 0x00, 0x00, 0x00, 0x00, // Partions id 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // High Watermark offset 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Last stable offset 0xff, 0xff, 0xff, 0xff, // Number of aborted transactions // Record Batch 0x00, 0x00, 0x00, 0x4f, // Size 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // First Offset 0x00, 0x00, 0x00, 0x43, // Length 0x00, 0x00, 0x00, 0x00, // Partition Leader Epoch 0x02, // Magic byte (version) 0x34, 0xa1, 0x4e, 0x1d, // CRC 0x00, 0x00, // Attributes 0x00, 0x00, 0x00, 0x00, // Last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // FirstTimespamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Producer ID 0xff, 0xff, // Producer Epoch 0xff, 0xff, 0xff, 0xff, // first sequence // Record 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x22, 0x00, 0x00, 0x00, 0x01, 0x16, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x00, } resp, err := ReadVersionedFetchResp(bytes.NewReader(oneMessageFetchResponseV4error), 4) if err != nil { t.Fatal(err) } if string(resp.Topics[0].Partitions[0].RecordBatches[0].Records[0].Value) != "hello world" { t.Fatal("Wrong response") } } func TestFetchResponseWithRecordBatchWithMultipleRecords(t *testing.T) { messageFetchResponseV5MultipleRecords := []byte{ 0, 0, 1, 205, //size 0, 0, 0, 63, //correlation, id 0, 0, 0, 0, //throttle, time 0, 0, 0, 1, //number, of, topics 0, 4, 97, 117, 116, 104, // name of the topic ( auth) 0, 0, 0, 1, //number of partitions 0, 0, 0, 0, // partition id 0, 0, // error 0, 0, 0, 0, 0, 0, 1, 121, //, hight, water, martk 255, 255, 255, 255, 255, 255, 255, 255, //, last, stable, offset 0, 0, 0, 0, 0, 0, 0, 0, //, log, start, offset 255, 255, 255, 255, //, , 0, 0, 1, 145, //, size, of, batch 0, 0, 0, 0, 0, 0, 1, 72, //, first, offset 0, 0, 1, 133, //, length 0, 0, 0, 2, //, partition, leader, epoch 2, //, magic, 7, 177, 219, 215, //crc, 0, 0, //, attr 0, 0, 0, 1, //, last, offset, delta 255, 255, 255, 255, 255, 255, 255, 255, //first, timestampt 255, 255, 255, 255, 255, 255, 255, 255, //, max, timestampt 255, 255, 255, 255, 255, 255, 255, 255, //, producer, id 255, 255, //, producer, epoch 255, 255, 255, 255, //, base, sequence 0, 0, 0, 2, //, number, of, records 212, 2, //size, 0, //, attribute 0, //, timestampt, delta 0, //, offset, delta 28, //, key, length 8, 128, 254, 7, 16, 131, 214, 136, 178, 165, 138, 220, 153, 2, 170, 2, 1, 1, 16, 17, 2, 32, 246, 240, 230, 136, 163, 28, //, key 67, //, 50, 175, 30, 87, 26, 228, 189, 143, 130, 80, 116, 194, 56, 70, 130, 136, 212, 23, 149, 222, 38, 125, 51, 192, 151, 107, 0, 0, 0, 10, 52, 10, 24, 102, 113, 109, 111, 106, 116, 50, 97, 113, 108, 119, 121, 101, 51, 118, 103, 116, 118, 114, 118, 105, 108, 119, 112, 24, 180, 149, 247, 197, 241, 159, 161, 188, 21, 32, 180, 249, 142, 150, 132, 160, 161, 188, 21, 42, 4, 104, 116, 116, 112, 18, 32, 246, 240, 230, 136, 163, 28, 67, 50, 175, 30, 87, 26, 228, 189, 143, 130, 80, 116, 194, 56, 70, 130, 136, 212, 23, 149, 222, 38, 125, 51, 192, 151, 26, 17, 111, 114, 103, 97, 110, 105, 122, 97, 116, 105, 111, 110, 58, 114, 101, 97, 100, 0, 204, 2, 0, 0, 2, 28, 8, 128, 254, 7, 16, 132, 214, 136, 178, 165, 138, 220, 153, 2, 162, 2, 1, 1, 16, 17, 2, 32, 246, 240, 230, 136, 163, 28, 67, 50, 175, 30, 87, 26, 228, 189, 143, 130, 80, 116, 194, 56, 70, 130, 136, 212, 23, 149, 222, 38, 125, 51, 192, 151, 103, 0, 0, 0, 10, 52, 10, 24, 119, 51, 120, 104, 55, 107, 118, 104, 104, 118, 112, 55, 104, 113, 111, 103, 119, 97, 108, 53, 55, 102, 100, 102, 24, 229, 183, 247, 197, 241, 159, 161, 188, 21, 32, 229, 155, 143, 150, 132, 160, 161, 188, 21, 42, 4, 104, 116, 116, 112, 18, 32, 246, 240, 230, 136, 163, 28, 67, 50, 175, 30, 87, 26, 228, 189, 143, 130, 80, 116, 194, 56, 70, 130, 136, 212, 23, 149, 222, 38, 125, 51, 192, 151, 26, 13, 115, 117, 112, 112, 108, 105, 101, 114, 58, 114, 101, 97, 100, 0, } resp, err := ReadVersionedFetchResp(bytes.NewReader(messageFetchResponseV5MultipleRecords), 5) if err != nil { t.Fatal(err) } records := resp.Topics[0].Partitions[0].RecordBatches[0].Records if len(records) != 2 { t.Fatal("Expect 2 records") } if records[0].Length != 170 { t.Fatal("Wrong record length") } if records[1].Length != 166 { t.Fatal("Wrong record length") } } func TestFetchResponseWithMultipleRecordBatches(t *testing.T) { messageFetchResponseV5MultipleBatches := []byte{ 0x00, 0x00, 0x02, 0x98, // size 0x00, 0x00, 0x00, 0xa7, // correlation id 0x00, 0x00, 0x00, 0x00, // throttle time 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x0c, // string length 0x6c, 0x65, 0x67, 0x61, 0x6c, 0x2d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, // topic name 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x00, 0x00, 0x00, // partition id 0x00, 0x00, // error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // tip offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // last stable offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // log start offset 0xff, 0xff, 0xff, 0xff, // array length 0x00, 0x00, 0x02, 0x54, // message set size // Batch 1: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, // first offset 0x00, 0x00, 0x01, 0x1e, // length 0x00, 0x00, 0x00, 0x04, // partition leader epoch 0x02, // message version 0xbc, 0x45, 0x46, 0x64, // crc 0x00, 0x00, // attributes 0x00, 0x00, 0x00, 0x00, // last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // first timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // max timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // producer id 0xff, 0xff, // producer epoch 0xff, 0xff, 0xff, 0xff, // first sequence 0x00, 0x00, 0x00, 0x01, // array length 0xd6, 0x03, // record length 0x00, // attributes 0x00, // timestamp delta 0x00, // offset delta 0x10, // key length 0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xf7, 0x07, // key 0xb8, 0x03, // value length (220) 0x01, 0x01, 0x50, 0x12, 0x04, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, // value... 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0xba, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x0a, 0x18, 0x76, 0x6a, 0x67, 0x65, 0x72, 0x6d, 0x73, 0x34, 0x32, 0x68, 0x6b, 0x35, 0x6f, 0x63, 0x74, 0x37, 0x70, 0x66, 0x33, 0x7a, 0x79, 0x35, 0x74, 0x34, 0x12, 0x07, 0x62, 0x30, 0x34, 0x39, 0x38, 0x65, 0x34, 0x18, 0xc3, 0xf6, 0xb0, 0xc2, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x22, 0x15, 0x0a, 0x08, 0x44, 0x45, 0x41, 0x44, 0x42, 0x45, 0x45, 0x46, 0x12, 0x09, 0x68, 0x74, 0x74, 0x70, 0x2d, 0x66, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0x1a, 0x16, 0x44, 0x45, 0x38, 0x39, 0x33, 0x37, 0x30, 0x34, 0x30, 0x30, 0x34, 0x34, 0x30, 0x35, 0x33, 0x32, 0x30, 0x31, 0x33, 0x30, 0x30, 0x30, 0x20, 0x91, 0xd1, 0xb2, 0xc0, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x28, 0x91, 0xd1, 0xfa, 0xb7, 0x9a, 0x9c, 0xa6, 0x85, 0x16, 0x32, 0x12, 0x44, 0x45, 0x35, 0x39, 0x5a, 0x5a, 0x5a, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35, 0x32, 0x35, 0x34, 0x39, 0x33, 0x3a, 0x18, 0x73, 0x76, 0x7a, 0x76, 0x6c, 0x63, 0x73, 0x79, 0x71, 0x6f, 0x64, 0x76, 0x73, 0x6f, 0x61, 0x65, 0x68, 0x75, 0x6e, 0x6b, 0x69, 0x6c, 0x69, 0x6d, // ...value 0x00, // headers length // Batch 2: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, // first offset 0x00, 0x00, 0x01, 0x1e, // length 0x00, 0x00, 0x00, 0x04, // partition leader epoch 0x02, // message version 0x31, 0x04, 0x90, 0xb6, // crc 0x00, 0x00, // attributes 0x00, 0x00, 0x00, 0x00, // last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // first timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // max timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // producer id 0xff, 0xff, // producer epoch 0xff, 0xff, 0xff, 0xff, // first sequence 0x00, 0x00, 0x00, 0x01, // array length 0xd6, 0x03, // record length 0x00, // attributes 0x00, // attribute delta 0x00, // offset delta 0x10, // key length 0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xa6, 0x08, // key 0xb8, 0x03, // value length (220) 0x01, 0x01, 0x50, 0x12, 0x04, 0x18, 0x6d, 0x6c, 0x72, 0x35, 0x68, 0x65, 0x6e, 0x6d, 0x61, 0x64, // value... 0x77, 0x64, 0x78, 0x6a, 0x73, 0x36, 0x35, 0x6f, 0x6e, 0x74, 0x6f, 0x62, 0x35, 0x77, 0xba, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x0a, 0x18, 0x36, 0x62, 0x76, 0x76, 0x64, 0x68, 0x36, 0x68, 0x66, 0x74, 0x70, 0x6e, 0x68, 0x63, 0x35, 0x35, 0x33, 0x79, 0x70, 0x67, 0x6c, 0x78, 0x72, 0x69, 0x12, 0x07, 0x62, 0x30, 0x34, 0x39, 0x38, 0x65, 0x34, 0x18, 0xfb, 0x88, 0xc4, 0xed, 0x93, 0xaa, 0x90, 0xcd, 0x15, 0x22, 0x15, 0x0a, 0x08, 0x44, 0x45, 0x41, 0x44, 0x42, 0x45, 0x45, 0x46, 0x12, 0x09, 0x68, 0x74, 0x74, 0x70, 0x2d, 0x66, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x6d, 0x6c, 0x72, 0x35, 0x68, 0x65, 0x6e, 0x6d, 0x61, 0x64, 0x77, 0x64, 0x78, 0x6a, 0x73, 0x36, 0x35, 0x6f, 0x6e, 0x74, 0x6f, 0x62, 0x35, 0x77, 0x1a, 0x16, 0x44, 0x45, 0x38, 0x39, 0x33, 0x37, 0x30, 0x34, 0x30, 0x30, 0x34, 0x34, 0x30, 0x35, 0x33, 0x32, 0x30, 0x31, 0x33, 0x30, 0x30, 0x30, 0x20, 0xeb, 0xad, 0xd3, 0xeb, 0x93, 0xaa, 0x90, 0xcd, 0x15, 0x28, 0xeb, 0xad, 0x9b, 0xe3, 0x8f, 0xb7, 0xa6, 0x85, 0x16, 0x32, 0x12, 0x44, 0x45, 0x35, 0x39, 0x5a, 0x5a, 0x5a, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35, 0x32, 0x35, 0x34, 0x39, 0x33, 0x3a, 0x18, 0x69, 0x6a, 0x71, 0x73, 0x6e, 0x33, 0x67, 0x6d, 0x67, 0x75, 0x65, 0x77, 0x67, 0x77, 0x63, 0x6d, 0x6e, 0x64, 0x73, 0x75, 0x62, 0x79, 0x68, 0x64, // ...value 0x00, // headers length } resp, err := ReadVersionedFetchResp(bytes.NewReader(messageFetchResponseV5MultipleBatches), 5) if err != nil { t.Fatal(err) } batches := resp.Topics[0].Partitions[0].RecordBatches if got, exp := len(batches), 2; got != exp { t.Fatalf("expected %d batches, got %d", exp, got) } if got, exp := len(batches[0].Records), 1; got != exp { t.Fatalf("expected %d records in batch 0, got %d", exp, got) } if got, exp := batches[0].Records[0].Key, []byte{0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xf7, 0x07}; !reflect.DeepEqual(got, exp) { t.Fatalf("expected key 0 %x, got %x", exp, got) } if got, exp := len(batches[0].Records[0].Value), 220; got != exp { t.Fatalf("expected value 0 length %d, got %d", exp, got) } if got, exp := len(batches[1].Records), 1; got != exp { t.Fatalf("expected %d records in batch 1, got %d", exp, got) } if got, exp := batches[1].Records[0].Key, []byte{0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xa6, 0x08}; !reflect.DeepEqual(got, exp) { t.Fatalf("expected key 1 %x, got %x", exp, got) } if got, exp := len(batches[1].Records[0].Value), 220; got != exp { t.Fatalf("expected value 1 length %d, got %d", exp, got) } } func TestFetchResponseWithPartialFinalBatch(t *testing.T) { headerAndBatch1 := []byte{ 0x00, 0x00, 0x02, 0x98, // size 0x00, 0x00, 0x00, 0xa7, // correlation id 0x00, 0x00, 0x00, 0x00, // throttle time 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x0c, // string length 0x6c, 0x65, 0x67, 0x61, 0x6c, 0x2d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, // topic name 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x00, 0x00, 0x00, // partition id 0x00, 0x00, // error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // tip offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // last stable offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // log start offset 0xff, 0xff, 0xff, 0xff, // array length 0x00, 0x00, 0x02, 0x54, // message set size // Batch 1: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, // first offset 0x00, 0x00, 0x01, 0x1e, // length 0x00, 0x00, 0x00, 0x04, // partition leader epoch 0x02, // message version 0xbc, 0x45, 0x46, 0x64, // crc 0x00, 0x00, // attributes 0x00, 0x00, 0x00, 0x00, // last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // first timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // max timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // producer id 0xff, 0xff, // producer epoch 0xff, 0xff, 0xff, 0xff, // first sequence 0x00, 0x00, 0x00, 0x01, // array length 0xd6, 0x03, // record length 0x00, // attributes 0x00, // timestamp delta 0x00, // offset delta 0x10, // key length 0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xf7, 0x07, // key 0xb8, 0x03, // value length (220) 0x01, 0x01, 0x50, 0x12, 0x04, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, // value... 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0xba, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x0a, 0x18, 0x76, 0x6a, 0x67, 0x65, 0x72, 0x6d, 0x73, 0x34, 0x32, 0x68, 0x6b, 0x35, 0x6f, 0x63, 0x74, 0x37, 0x70, 0x66, 0x33, 0x7a, 0x79, 0x35, 0x74, 0x34, 0x12, 0x07, 0x62, 0x30, 0x34, 0x39, 0x38, 0x65, 0x34, 0x18, 0xc3, 0xf6, 0xb0, 0xc2, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x22, 0x15, 0x0a, 0x08, 0x44, 0x45, 0x41, 0x44, 0x42, 0x45, 0x45, 0x46, 0x12, 0x09, 0x68, 0x74, 0x74, 0x70, 0x2d, 0x66, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0x1a, 0x16, 0x44, 0x45, 0x38, 0x39, 0x33, 0x37, 0x30, 0x34, 0x30, 0x30, 0x34, 0x34, 0x30, 0x35, 0x33, 0x32, 0x30, 0x31, 0x33, 0x30, 0x30, 0x30, 0x20, 0x91, 0xd1, 0xb2, 0xc0, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x28, 0x91, 0xd1, 0xfa, 0xb7, 0x9a, 0x9c, 0xa6, 0x85, 0x16, 0x32, 0x12, 0x44, 0x45, 0x35, 0x39, 0x5a, 0x5a, 0x5a, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35, 0x32, 0x35, 0x34, 0x39, 0x33, 0x3a, 0x18, 0x73, 0x76, 0x7a, 0x76, 0x6c, 0x63, 0x73, 0x79, 0x71, 0x6f, 0x64, 0x76, 0x73, 0x6f, 0x61, 0x65, 0x68, 0x75, 0x6e, 0x6b, 0x69, 0x6c, 0x69, 0x6d, // ...value 0x00, // headers length } batch2 := []byte{ // Batch 2: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, // first offset 0x00, 0x00, 0x01, 0x1e, // length 0x00, 0x00, 0x00, 0x04, // partition leader epoch 0x02, // message version 0x31, 0x04, 0x90, 0xb6, // crc 0x00, 0x00, // attributes 0x00, 0x00, 0x00, 0x00, // last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // first timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // max timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // producer id 0xff, 0xff, // producer epoch 0xff, 0xff, 0xff, 0xff, // first sequence 0x00, 0x00, 0x00, 0x01, // array length 0xd6, 0x03, // record length 0x00, // attributes 0x00, // attribute delta 0x00, // offset delta 0x10, // key length 0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xa6, 0x08, // key 0xb8, 0x03, // value length (220) 0x01, 0x01, 0x50, 0x12, 0x04, 0x18, 0x6d, 0x6c, 0x72, 0x35, 0x68, 0x65, 0x6e, 0x6d, 0x61, 0x64, // value... 0x77, 0x64, 0x78, 0x6a, 0x73, 0x36, 0x35, 0x6f, 0x6e, 0x74, 0x6f, 0x62, 0x35, 0x77, 0xba, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x0a, 0x18, 0x36, 0x62, 0x76, 0x76, 0x64, 0x68, 0x36, 0x68, 0x66, 0x74, 0x70, 0x6e, 0x68, 0x63, 0x35, 0x35, 0x33, 0x79, 0x70, 0x67, 0x6c, 0x78, 0x72, 0x69, 0x12, 0x07, 0x62, 0x30, 0x34, 0x39, 0x38, 0x65, 0x34, 0x18, 0xfb, 0x88, 0xc4, 0xed, 0x93, 0xaa, 0x90, 0xcd, 0x15, 0x22, 0x15, 0x0a, 0x08, 0x44, 0x45, 0x41, 0x44, 0x42, 0x45, 0x45, 0x46, 0x12, 0x09, 0x68, 0x74, 0x74, 0x70, 0x2d, 0x66, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x6d, 0x6c, 0x72, 0x35, 0x68, 0x65, 0x6e, 0x6d, 0x61, 0x64, 0x77, 0x64, 0x78, 0x6a, 0x73, 0x36, 0x35, 0x6f, 0x6e, 0x74, 0x6f, 0x62, 0x35, 0x77, 0x1a, 0x16, 0x44, 0x45, 0x38, 0x39, 0x33, 0x37, 0x30, 0x34, 0x30, 0x30, 0x34, 0x34, 0x30, 0x35, 0x33, 0x32, 0x30, 0x31, 0x33, 0x30, 0x30, 0x30, 0x20, 0xeb, 0xad, 0xd3, 0xeb, 0x93, 0xaa, 0x90, 0xcd, 0x15, 0x28, 0xeb, 0xad, 0x9b, 0xe3, 0x8f, 0xb7, 0xa6, 0x85, 0x16, 0x32, 0x12, 0x44, 0x45, 0x35, 0x39, 0x5a, 0x5a, 0x5a, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35, 0x32, 0x35, 0x34, 0x39, 0x33, 0x3a, 0x18, 0x69, 0x6a, 0x71, 0x73, 0x6e, 0x33, 0x67, 0x6d, 0x67, 0x75, 0x65, 0x77, 0x67, 0x77, 0x63, 0x6d, 0x6e, 0x64, 0x73, 0x75, 0x62, 0x79, 0x68, 0x64, // ...value 0x00, // headers length } // It's fairly easy to test all cutoff points within batch2. // NB: we're not testing an empty or complete batch2 here. for cutoff := 1; cutoff < len(batch2); cutoff++ { cutoff := cutoff t.Run(fmt.Sprintf("cutoff%d", cutoff), func(t *testing.T) { t.Parallel() input := append(headerAndBatch1, batch2[:cutoff]...) resp, err := ReadVersionedFetchResp(bytes.NewReader(input), 5) if err != nil { t.Fatal(err) } batches := resp.Topics[0].Partitions[0].RecordBatches if got, exp := len(batches), 1; got != exp { t.Fatalf("expected %d batches, got %d", exp, got) } if got, exp := len(batches[0].Records), 1; got != exp { t.Fatalf("expected %d records in batch 0, got %d", exp, got) } got, exp := batches[0].Records[0].Key, []byte{0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xf7, 0x07} if !reflect.DeepEqual(got, exp) { t.Fatalf("expected key 0 %x, got %x", exp, got) } }) } } func TestConsumerMetadataWithVersions(t *testing.T) { respV0 := ConsumerMetadataResp{ Version: 0, CorrelationID: 1, ThrottleTime: 0, Err: nil, ErrMsg: "", CoordinatorID: 1, CoordinatorHost: "host", CoordinatorPort: 33333, } b0, err := respV0.Bytes() if err != nil { t.Fatal(err) } r0, err := ReadVersionedConsumerMetadataResp(bytes.NewReader(b0), respV0.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respV0, *r0) { t.Errorf("Expected \n %#+v\n fot \n %#+v\n", respV0, *r0) } respV1 := respV0 respV1.Version = KafkaV1 respV1.ThrottleTime = 10 * time.Second respV1.ErrMsg = "My error" b1, err := respV1.Bytes() if err != nil { t.Fatal(err) } r1, err := ReadVersionedConsumerMetadataResp(bytes.NewReader(b1), respV1.Version) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(respV1, *r1) { t.Errorf("Expected \n %#+v\n fot \n %#+v\n", respV1, *r1) } } func TestOffsetCommitResponseWithVersions(t *testing.T) { respV0 := OffsetCommitResp{ Version: KafkaV0, CorrelationID: 1, ThrottleTime: 0, Topics: []OffsetCommitRespTopic{ OffsetCommitRespTopic{ Name: "test", Partitions: []OffsetCommitRespPartition{ OffsetCommitRespPartition{ ID: 1, Err: nil, }, }, }, }, } b0, err := respV0.Bytes() if err != nil { t.Fatal(err) } resp0, err := ReadVersionedOffsetCommitResp(bytes.NewReader(b0), respV0.Version) if !reflect.DeepEqual(&respV0, resp0) { t.Fatalf("Not equal %+#v , %+#v", respV0, resp0) } respV3 := respV0 respV3.Version = KafkaV3 respV3.ThrottleTime = 2 * time.Second b3, err := respV3.Bytes() if err != nil { t.Fatal(err) } resp3, err := ReadVersionedOffsetCommitResp(bytes.NewReader(b3), respV3.Version) if !reflect.DeepEqual(respV3, *resp3) { t.Fatalf("Not equal \n%+#v , \n%+#v", respV3, *resp3) } } func TestAPIVersionsResponseWithVersions(t *testing.T) { respV0 := APIVersionsResp{ CorrelationID: 1, APIVersions: []SupportedVersion{ SupportedVersion{ APIKey: 1, MinVersion: 0, MaxVersion: 2, }, }, } b0, err := respV0.Bytes() if err != nil { t.Fatal(err) } resp0, err := ReadVersionedAPIVersionsResp(bytes.NewReader(b0), respV0.Version) if !reflect.DeepEqual(respV0, *resp0) { t.Fatalf("Not equal \n %+#v , \n %+#v", respV0, *resp0) } respV1 := respV0 respV1.Version = KafkaV1 respV1.ThrottleTime = 2 * time.Second b1, err := respV1.Bytes() if err != nil { t.Fatal(err) } resp1, err := ReadVersionedAPIVersionsResp(bytes.NewReader(b1), respV1.Version) if !reflect.DeepEqual(respV1, *resp1) { t.Fatalf("Not equal \n%+#v , \n%+#v", respV1, *resp1) } } func TestSerializeEmptyMessageSet(t *testing.T) { var buf bytes.Buffer messages := []*Message{} n, err := writeMessageSet(&buf, messages, CompressionNone) if err != nil { t.Fatalf("cannot serialize messages: %s", err) } if n != 0 { t.Fatalf("got n=%d result from writeMessageSet; want 0", n) } if l := len(buf.Bytes()); l != 0 { t.Fatalf("got len=%d for empty message set; should be 0", l) } } func TestReadIncompleteMessage(t *testing.T) { var buf bytes.Buffer _, err := writeMessageSet(&buf, []*Message{ {Value: []byte("111111111111111")}, {Value: []byte("222222222222222")}, {Value: []byte("333333333333333")}, }, CompressionNone) if err != nil { t.Fatalf("cannot serialize messages: %s", err) } b := buf.Bytes() // cut off the last bytes as kafka can do b = b[:len(b)-4] messages, err := readMessageSet(bytes.NewBuffer(b), int32(len(b))) if err != nil { t.Fatalf("cannot deserialize messages: %s", err) } if len(messages) != 2 { t.Fatalf("expected 2 messages, got %d", len(messages)) } if messages[0].Value[0] != '1' || messages[1].Value[0] != '2' { t.Fatal("expected different messages content") } } func TestReadEmptyMessage(t *testing.T) { var buf bytes.Buffer enc := NewEncoder(&buf) message := Message{} enc.EncodeInt64(message.Offset) enc.EncodeInt32(0) if err := enc.Err(); err != nil { t.Fatalf("encoding error: %s", err) } b := buf.Bytes() messages, err := readMessageSet(bytes.NewBuffer(b), int32(len(b))) if err != nil { t.Fatalf("cannot deserialize messages: %s", err) } if len(messages) != 0 { t.Fatalf("expected 0 messages, got %d", len(messages)) } } func TestCreateTopics(t *testing.T) { reference := []byte{ 0, 0, 0, 77, // size 0, 19, //kind 0, 0, 0, 0, // version 0, 3, // CorrelationID 0, 0, // ClientID 0, 0, 0, 1, // size of []TopicInfo 0, 5, 't', 'o', 'p', 'i', 'c', // topic 255, 255, 255, 255, // NumPartitions 255, 255, //ReplicationFactor 0, 0, 0, 1, // size of ReplicaAssignments 0, 0, 0, 0, // Partition 0, 0, 0, 3, // size or Replicas 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, // {0, 1, 2} 0, 0, 0, 1, // size of ConfigEntries 0, 12, 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', // "retention.ms" 0, 2, '-', '1', // "-1", 0, 0, 0, 0, // timeout } req := CreateTopicsReq{ Timeout: 0, ValidateOnly: false, } req.correlationID = 3 topicInfo := TopicInfo{ Topic: "topic", NumPartitions: -1, ReplicationFactor: -1, ReplicaAssignments: nil, ConfigEntries: nil, } ra := ReplicaAssignment{ Partition: 0, Replicas: []int32{0, 1, 2}, } topicInfo.ReplicaAssignments = []ReplicaAssignment{ra} ce := ConfigEntry{ ConfigName: "retention.ms", ConfigValue: "-1", } topicInfo.ConfigEntries = []ConfigEntry{ce} req.CreateTopicsRequests = []TopicInfo{topicInfo} b, err := req.Bytes() if err != nil { t.Fatal(err) } if len(b) != len(reference) { t.Errorf("Bytes representation wrong") } for i := range b { if b[i] != reference[i] { t.Fatalf("Bytes representation wrong on %d byte", i) } } req1, err := ReadCreateTopicsReq(bytes.NewBuffer(b)) if err != nil { t.Fatal(err) } req2 := req1.CreateTopicsRequests for i, topic := range req.CreateTopicsRequests { if topic.ReplicationFactor != req2[i].ReplicationFactor { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } if topic.NumPartitions != req2[i].NumPartitions { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } if topic.Topic != req2[i].Topic { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } for j, ce := range topic.ConfigEntries { if ce.ConfigName != req2[i].ConfigEntries[j].ConfigName { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } if ce.ConfigValue != req2[i].ConfigEntries[j].ConfigValue { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } } for k, ra := range topic.ReplicaAssignments { if ra.Partition != req2[i].ReplicaAssignments[k].Partition { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } for l, repl := range ra.Replicas { if repl != req2[i].ReplicaAssignments[k].Replicas[l] { t.Errorf("req1 = %+v req2 = %+v \n", req, req2) } } } } resp := CreateTopicsResp{ Version: 0, CorrelationID: 1, TopicErrors: []TopicError{TopicError{ ErrorCode: 0, Topic: "testtopic", }}, } b1, err := resp.Bytes() if err != nil { t.Fatal(err) } resp2, err := ReadCreateTopicsResp(bytes.NewBuffer(b1)) if err != nil { t.Fatal(err) } if resp.CorrelationID != resp2.CorrelationID { t.Errorf("resp1 = %+v resp2 = %+v \n", resp, resp) } for i, te := range resp.TopicErrors { if te.ErrorCode != resp2.TopicErrors[i].ErrorCode { t.Errorf("resp1 = %+v resp2 = %+v \n", resp, resp) } if te.Topic != resp2.TopicErrors[i].Topic { t.Errorf("resp1 = %+v resp2 = %+v \n", resp, resp) } } } func TestVersionedCreateTopicRequest(t *testing.T) { reqV0 := CreateTopicsReq{ Timeout: 0, ValidateOnly: false, } topicInfo := TopicInfo{ Topic: "topic", NumPartitions: -1, ReplicationFactor: -1, ReplicaAssignments: nil, ConfigEntries: nil, } ra := ReplicaAssignment{ Partition: 0, Replicas: []int32{0, 1, 2}, } topicInfo.ReplicaAssignments = []ReplicaAssignment{ra} ce := ConfigEntry{ ConfigName: "retention.ms", ConfigValue: "-1", } topicInfo.ConfigEntries = []ConfigEntry{ce} reqV0.CreateTopicsRequests = []TopicInfo{topicInfo} b, err := reqV0.Bytes() if err != nil { t.Fatal(err) } respV0, err := ReadCreateTopicsReq(bytes.NewReader(b)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(reqV0, *respV0) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", reqV0, *respV0) } reqV1 := reqV0 reqV1.version = KafkaV1 reqV1.ValidateOnly = true b, err = reqV1.Bytes() if err != nil { t.Fatal(err) } respV1, err := ReadCreateTopicsReq(bytes.NewReader(b)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(reqV1, *respV1) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", reqV1, *respV1) } reqV2 := reqV0 reqV2.version = KafkaV2 reqV2.ValidateOnly = true b, err = reqV2.Bytes() if err != nil { t.Fatal(err) } respV2, err := ReadCreateTopicsReq(bytes.NewReader(b)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(reqV2, *respV2) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", reqV2, *respV2) } } func TestVersionedCreateTopicResponse(t *testing.T) { origRespV0 := CreateTopicsResp{ CorrelationID: 0, Version: KafkaV0, TopicErrors: []TopicError{ TopicError{ ErrorCode: 1, Topic: "mytopic", Err: ErrOffsetOutOfRange, }, }, } b, err := origRespV0.Bytes() if err != nil { t.Fatal(err) } respV0, err := ReadVersionedCreateTopicsResp(bytes.NewReader(b), KafkaV0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(origRespV0, *respV0) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", origRespV0, *respV0) } origRespV1 := origRespV0 origRespV1.TopicErrors[0].ErrorMessage = "Error!" origRespV1.Version = KafkaV1 b, err = origRespV1.Bytes() if err != nil { t.Fatal(err) } respV1, err := ReadVersionedCreateTopicsResp(bytes.NewReader(b), KafkaV1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(origRespV1, *respV1) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", origRespV1, *respV1) } origRespV2 := origRespV1 origRespV2.ThrottleTime = 5 * time.Second origRespV2.Version = KafkaV2 b, err = origRespV2.Bytes() if err != nil { t.Fatal(err) } respV2, err := ReadVersionedCreateTopicsResp(bytes.NewReader(b), KafkaV2) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(origRespV2, *respV2) { t.Fatalf("Responses are not equal: expect \n %#+v got \n %#+v", origRespV2, *respV2) } } func BenchmarkProduceRequestMarshal(b *testing.B) { messages := make([]*Message, 100) for i := range messages { messages[i] = &Message{ Offset: int64(i), Crc: uint32(i), Key: nil, Value: []byte(`Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur.`), } } req := &ProduceReq{ RequestHeader: RequestHeader{correlationID: 241, ClientID: "test"}, Compression: CompressionNone, RequiredAcks: RequiredAcksAll, Timeout: time.Second, Topics: []ProduceReqTopic{ { Name: "foo", Partitions: []ProduceReqPartition{ { ID: 0, Messages: messages, }, }, }, }, } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := req.Bytes(); err != nil { b.Fatalf("could not serialize messages: %s", err) } } } func BenchmarkProduceResponseUnmarshal(b *testing.B) { resp := &ProduceResp{ CorrelationID: 241, Topics: []ProduceRespTopic{ { Name: "foo", Partitions: []ProduceRespPartition{ { ID: 0, Err: error(nil), Offset: 1, }, }, }, }, } raw, err := resp.Bytes() if err != nil { b.Fatalf("cannot serialize response: %s", err) } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := ReadVersionedProduceResp(bytes.NewBuffer(raw), resp.Version); err != nil { b.Fatalf("could not deserialize messages: %s", err) } } } func BenchmarkFetchRequestMarshal(b *testing.B) { req := &FetchReq{ RequestHeader: RequestHeader{correlationID: 241, ClientID: "test"}, MaxWaitTime: time.Second * 2, MinBytes: 12454, Topics: []FetchReqTopic{ { Name: "foo", Partitions: []FetchReqPartition{ {ID: 421, FetchOffset: 529, MaxBytes: 4921}, {ID: 0, FetchOffset: 11, MaxBytes: 92}, }, }, }, } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := req.Bytes(); err != nil { b.Fatalf("could not serialize messages: %s", err) } } } func BenchmarkFetchResponseUnmarshal(b *testing.B) { messages := make([]*Message, 100) for i := range messages { messages[i] = &Message{ Offset: int64(i), Key: nil, Value: []byte(`Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur.`), } } resp := &FetchResp{ CorrelationID: 241, Topics: []FetchRespTopic{ { Name: "foo", Partitions: []FetchRespPartition{ { ID: 0, TipOffset: 444, Messages: messages, }, { ID: 123, Err: ErrBrokerNotAvailable, TipOffset: -1, Messages: []*Message{}, }, }, }, }, } raw, err := resp.Bytes() if err != nil { b.Fatalf("cannot serialize response: %s", err) } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := ReadVersionedFetchResp(bytes.NewBuffer(raw), KafkaV0); err != nil { b.Fatalf("could not deserialize messages: %s", err) } } } func BenchmarkFetchResponseV5Unmarshal(b *testing.B) { data := []byte{ 0x00, 0x00, 0x02, 0x98, // size 0x00, 0x00, 0x00, 0xa7, // correlation id 0x00, 0x00, 0x00, 0x00, // throttle time 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x0c, // string length 0x6c, 0x65, 0x67, 0x61, 0x6c, 0x2d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, // topic name 0x00, 0x00, 0x00, 0x01, // array size 0x00, 0x00, 0x00, 0x00, // partition id 0x00, 0x00, // error 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // tip offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, // last stable offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // log start offset 0xff, 0xff, 0xff, 0xff, // array length 0x00, 0x00, 0x02, 0x54, // message set size // Batch 1: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, // first offset 0x00, 0x00, 0x01, 0x1e, // length 0x00, 0x00, 0x00, 0x04, // partition leader epoch 0x02, // message version 0xbc, 0x45, 0x46, 0x64, // crc 0x00, 0x00, // attributes 0x00, 0x00, 0x00, 0x00, // last offset delta 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // first timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // max timestamp 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // producer id 0xff, 0xff, // producer epoch 0xff, 0xff, 0xff, 0xff, // first sequence 0x00, 0x00, 0x00, 0x01, // array length 0xd6, 0x03, // record length 0x00, // attributes 0x00, // timestamp delta 0x00, // offset delta 0x10, // key length 0x08, 0x80, 0x82, 0x94, 0x09, 0x10, 0xf7, 0x07, // key 0xb8, 0x03, // value length (220) 0x01, 0x01, 0x50, 0x12, 0x04, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, // value... 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0xba, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x0a, 0x18, 0x76, 0x6a, 0x67, 0x65, 0x72, 0x6d, 0x73, 0x34, 0x32, 0x68, 0x6b, 0x35, 0x6f, 0x63, 0x74, 0x37, 0x70, 0x66, 0x33, 0x7a, 0x79, 0x35, 0x74, 0x34, 0x12, 0x07, 0x62, 0x30, 0x34, 0x39, 0x38, 0x65, 0x34, 0x18, 0xc3, 0xf6, 0xb0, 0xc2, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x22, 0x15, 0x0a, 0x08, 0x44, 0x45, 0x41, 0x44, 0x42, 0x45, 0x45, 0x46, 0x12, 0x09, 0x68, 0x74, 0x74, 0x70, 0x2d, 0x66, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x77, 0x70, 0x77, 0x6a, 0x36, 0x61, 0x62, 0x6c, 0x75, 0x7a, 0x72, 0x62, 0x7a, 0x64, 0x72, 0x36, 0x6f, 0x62, 0x67, 0x6f, 0x62, 0x7a, 0x6d, 0x35, 0x1a, 0x16, 0x44, 0x45, 0x38, 0x39, 0x33, 0x37, 0x30, 0x34, 0x30, 0x30, 0x34, 0x34, 0x30, 0x35, 0x33, 0x32, 0x30, 0x31, 0x33, 0x30, 0x30, 0x30, 0x20, 0x91, 0xd1, 0xb2, 0xc0, 0x9e, 0x8f, 0x90, 0xcd, 0x15, 0x28, 0x91, 0xd1, 0xfa, 0xb7, 0x9a, 0x9c, 0xa6, 0x85, 0x16, 0x32, 0x12, 0x44, 0x45, 0x35, 0x39, 0x5a, 0x5a, 0x5a, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35, 0x32, 0x35, 0x34, 0x39, 0x33, 0x3a, 0x18, 0x73, 0x76, 0x7a, 0x76, 0x6c, 0x63, 0x73, 0x79, 0x71, 0x6f, 0x64, 0x76, 0x73, 0x6f, 0x61, 0x65, 0x68, 0x75, 0x6e, 0x6b, 0x69, 0x6c, 0x69, 0x6d, // ...value 0x00, // headers length } r := bytes.NewReader(data) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { r.Reset(data) if _, err := ReadVersionedFetchResp(r, 5); err != nil { b.Fatalf("could not deserialize messages: %s", err) } } } kafka-2.1.1/v2/proto/serialization.go000066400000000000000000000154021356004474300174760ustar00rootroot00000000000000package proto import ( "encoding/binary" "errors" "fmt" "io" "time" ) const ( maxParseArrayLen = 256 ) var ErrNotEnoughData = errors.New("not enough data") var ErrInvalidArrayLen = errors.New("invalid array length") type decoder struct { buf []byte r io.Reader err error } func NewDecoder(r io.Reader) *decoder { return &decoder{ r: r, buf: make([]byte, 1024), } } func (d *decoder) SetReader(r io.Reader) { d.r = r } func (d *decoder) DecodeInt8() int8 { if d.err != nil { return 0 } b := d.buf[:1] n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return 0 } if n != 1 { d.err = ErrNotEnoughData return 0 } return int8(b[0]) } func (d *decoder) DecodeInt16() int16 { if d.err != nil { return 0 } b := d.buf[:2] n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return 0 } if n != 2 { d.err = ErrNotEnoughData return 0 } return int16(binary.BigEndian.Uint16(b)) } func (d *decoder) DecodeInt32() int32 { if d.err != nil { return 0 } b := d.buf[:4] n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return 0 } if n != 4 { d.err = ErrNotEnoughData return 0 } return int32(binary.BigEndian.Uint32(b)) } func (d *decoder) DecodeUint32() uint32 { if d.err != nil { return 0 } b := d.buf[:4] n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return 0 } if n != 4 { d.err = ErrNotEnoughData return 0 } return binary.BigEndian.Uint32(b) } func (d *decoder) DecodeInt64() int64 { if d.err != nil { return 0 } b := d.buf[:8] n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return 0 } if n != 8 { d.err = ErrNotEnoughData return 0 } return int64(binary.BigEndian.Uint64(b)) } func (d *decoder) DecodeDuration32() time.Duration { return time.Duration(d.DecodeInt32()) * time.Millisecond } func (d *decoder) DecodeString() string { if d.err != nil { return "" } slen := d.DecodeInt16() if d.err != nil { return "" } if slen < 1 { return "" } var b []byte if int(slen) > len(d.buf) { var err error b, err = allocParseBuf(int(slen)) if err != nil { d.err = err return "" } } else { b = d.buf[:int(slen)] } n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return "" } if n != int(slen) { d.err = ErrNotEnoughData return "" } return string(b) } func (d *decoder) DecodeArrayLen() (int, error) { len := int(d.DecodeInt32()) // Sometime kafka may send -1 as size of array. if len == -1 { return 0, nil } if len > maxParseBufSize { return 0, ErrInvalidArrayLen } return len, nil } func (d *decoder) DecodeBytes() []byte { if d.err != nil { return nil } slen := d.DecodeInt32() if d.err != nil { return nil } if slen < 1 { return nil } b, err := allocParseBuf(int(slen)) if err != nil { d.err = err return nil } n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return nil } if n != int(slen) { d.err = ErrNotEnoughData return nil } return b } func (d *decoder) DecodeVarInt() int64 { // err already stored by ReadByte(): res, _ := binary.ReadVarint(d) return res } // ReadByte implements ByteReader func (d *decoder) ReadByte() (byte, error) { _, err := io.ReadFull(d.r, d.buf[:1]) if err != nil { d.err = err } return d.buf[0], err } func (d *decoder) DecodeVarBytes() []byte { slen := d.DecodeVarInt() if slen < 1 { return nil } b, err := allocParseBuf(int(slen)) if err != nil { d.err = err return nil } n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return nil } if n != int(slen) { d.err = ErrNotEnoughData return nil } return b } func (d *decoder) DecodeVarString() string { if d.err != nil { return "" } slen := d.DecodeVarInt() if d.err != nil { return "" } if slen < 1 { return "" } var b []byte if int(slen) > len(d.buf) { var err error b, err = allocParseBuf(int(slen)) if err != nil { d.err = err return "" } } else { b = d.buf[:int(slen)] } n, err := io.ReadFull(d.r, b) if err != nil { d.err = err return "" } if n != int(slen) { d.err = ErrNotEnoughData return "" } return string(b) } func (d *decoder) Err() error { return d.err } type encoder struct { w io.Writer err error buf [8]byte } func NewEncoder(w io.Writer) *encoder { return &encoder{w: w} } func (e *encoder) EncodeDuration(val time.Duration) { if e.err != nil { return } intVal := uint32(val / time.Millisecond) b := e.buf[:4] binary.BigEndian.PutUint32(b, intVal) _, e.err = e.w.Write(b) } func (e *encoder) EncodeInt8(val int8) { if e.err != nil { return } b := e.buf[:1] b[0] = byte(val) _, e.err = e.w.Write(b) } func (e *encoder) EncodeInt16(val int16) { if e.err != nil { return } b := e.buf[:2] binary.BigEndian.PutUint16(b, uint16(val)) e.err = writeAll(e.w, b) } func (e *encoder) EncodeInt32(val int32) { if e.err != nil { return } b := e.buf[:4] binary.BigEndian.PutUint32(b, uint32(val)) e.err = writeAll(e.w, b) } func (e *encoder) EncodeInt32s(val []int32) { if e.err != nil { return } e.EncodeArrayLen(len(val)) for _, v := range val { e.EncodeInt32(v) } } func (e *encoder) EncodeInt64(val int64) { if e.err != nil { return } b := e.buf[:8] binary.BigEndian.PutUint64(b, uint64(val)) e.err = writeAll(e.w, b) } func (e *encoder) EncodeInt64s(val []int64) { if e.err != nil { return } e.EncodeArrayLen(len(val)) for _, v := range val { e.EncodeInt64(v) } } func (e *encoder) EncodeUint32(val uint32) { if e.err != nil { return } b := e.buf[:4] binary.BigEndian.PutUint32(b, val) e.err = writeAll(e.w, b) } func (e *encoder) EncodeBytes(val []byte) { if e.err != nil { return } buf := e.buf[:4] if val == nil { no := int32(-1) binary.BigEndian.PutUint32(buf, uint32(no)) e.err = writeAll(e.w, buf) return } binary.BigEndian.PutUint32(buf, uint32(len(val))) e.err = writeAll(e.w, buf) if e.err == nil { e.err = writeAll(e.w, val) } } func (e *encoder) EncodeString(val string) { if e.err != nil { return } buf := e.buf[:2] binary.BigEndian.PutUint16(buf, uint16(len(val))) e.err = writeAll(e.w, buf) if e.err == nil { e.err = writeAll(e.w, []byte(val)) } } func (e *encoder) EncodeError(err error) { b := e.buf[:2] if err == nil { binary.BigEndian.PutUint16(b, uint16(0)) e.err = writeAll(e.w, b) return } kerr, ok := err.(*KafkaError) if !ok { e.err = fmt.Errorf("cannot encode error of type %T", err) } binary.BigEndian.PutUint16(b, uint16(kerr.errno)) e.err = writeAll(e.w, b) } func (e *encoder) EncodeArrayLen(length int) { e.EncodeInt32(int32(length)) } func (e *encoder) Err() error { return e.err } func writeAll(w io.Writer, b []byte) error { n, err := w.Write(b) if err != nil { return err } if n != len(b) { return fmt.Errorf("cannot write %d: %d written", len(b), n) } return nil } kafka-2.1.1/v2/proto/serialization_test.go000066400000000000000000000054101356004474300205330ustar00rootroot00000000000000package proto import ( "bytes" "testing" ) var ( keyint = 12 bint8 = []byte{0x0c} bint64 = []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c} keystr = "Ash nazg durbatulûk, ash nazg gimbatul, ash nazg thrakatulûk, agh burzum-ishi krimpatul" // google LOTR one ring bstr = []byte{0x00, 0x59, 0x41, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x64, 0x75, 0x72, 0x62, 0x61, 0x74, 0x75, 0x6c, 0xc3, 0xbb, 0x6b, 0x2c, 0x20, 0x61, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x67, 0x69, 0x6d, 0x62, 0x61, 0x74, 0x75, 0x6c, 0x2c, 0x20, 0x61, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x74, 0x68, 0x72, 0x61, 0x6b, 0x61, 0x74, 0x75, 0x6c, 0xc3, 0xbb, 0x6b, 0x2c, 0x20, 0x61, 0x67, 0x68, 0x20, 0x62, 0x75, 0x72, 0x7a, 0x75, 0x6d, 0x2d, 0x69, 0x73, 0x68, 0x69, 0x20, 0x6b, 0x72, 0x69, 0x6d, 0x70, 0x61, 0x74, 0x75, 0x6c} bbyte = []byte{0x00, 0x00, 0x00, 0x59, 0x41, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x64, 0x75, 0x72, 0x62, 0x61, 0x74, 0x75, 0x6c, 0xc3, 0xbb, 0x6b, 0x2c, 0x20, 0x61, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x67, 0x69, 0x6d, 0x62, 0x61, 0x74, 0x75, 0x6c, 0x2c, 0x20, 0x61, 0x73, 0x68, 0x20, 0x6e, 0x61, 0x7a, 0x67, 0x20, 0x74, 0x68, 0x72, 0x61, 0x6b, 0x61, 0x74, 0x75, 0x6c, 0xc3, 0xbb, 0x6b, 0x2c, 0x20, 0x61, 0x67, 0x68, 0x20, 0x62, 0x75, 0x72, 0x7a, 0x75, 0x6d, 0x2d, 0x69, 0x73, 0x68, 0x69, 0x20, 0x6b, 0x72, 0x69, 0x6d, 0x70, 0x61, 0x74, 0x75, 0x6c} ) var b = bytes.NewBuffer(nil) func getTestEncoder() *encoder { b.Reset() return NewEncoder(b) } func TestEncoder(t *testing.T) { e := getTestEncoder() e.EncodeInt8(int8(keyint)) if !bytes.Equal(b.Bytes(), bint8) { t.Fatalf("bytes are not the same % x != % x", b.Bytes(), bint8) } e = getTestEncoder() e.EncodeInt64(int64(keyint)) if !bytes.Equal(b.Bytes(), bint64) { t.Fatalf("bytes are not the same % x != % x", b.Bytes(), bint64) } e = getTestEncoder() e.EncodeString(string(keystr)) if !bytes.Equal(b.Bytes(), bstr) { t.Fatalf("bytes are not the same % x != % x", b.Bytes(), bstr) } } func TestDecoder(t *testing.T) { d := NewDecoder(bytes.NewBuffer(bint8)) if d.DecodeInt8() != int8(keyint) { t.Fatalf("int8 decoding failed") } d = NewDecoder(bytes.NewBuffer(bint64)) if d.DecodeInt64() != int64(keyint) { t.Fatalf("int64 decoding failed") } d = NewDecoder(bytes.NewBuffer(bstr)) if d.DecodeString() != keystr { t.Fatalf("string decoding failed") } d = NewDecoder(bytes.NewBuffer(bbyte)) if !bytes.Equal(d.DecodeBytes(), []byte(keystr)) { t.Fatalf("bytes are not the same") } } func BenchmarkReadVarint(b *testing.B) { data := []byte{0x10} r := bytes.NewReader(data) dec := NewDecoder(r) b.ReportAllocs() for i := 0; i < b.N; i++ { r.Reset(data) x := dec.DecodeVarInt() if x != 8 { b.Fatalf("unexpected value decoded: %d", x) } } } kafka-2.1.1/v2/proto/snappy.go000066400000000000000000000025501356004474300161330ustar00rootroot00000000000000package proto import ( "bytes" "encoding/binary" "fmt" "github.com/golang/snappy" ) // Snappy-encoded messages from the official Java client are encoded using // snappy-java: see github.com/xerial/snappy-java. // This does its own non-standard framing. We can detect this encoding // by sniffing its special header. // // That library will still read plain (unframed) snappy-encoded messages, // so we don't need to implement that codec on the compression side. // // (This is the same behavior as several of the other popular Kafka clients.) var snappyJavaMagic = []byte("\x82SNAPPY\x00") func snappyDecode(b []byte) ([]byte, error) { if !bytes.HasPrefix(b, snappyJavaMagic) { return snappy.Decode(nil, b) } // See https://github.com/xerial/snappy-java/blob/develop/src/main/java/org/xerial/snappy/SnappyInputStream.java version := binary.BigEndian.Uint32(b[8:12]) if version != 1 { return nil, fmt.Errorf("cannot handle snappy-java codec version other than 1 (got %d)", version) } // b[12:16] is the "compatible version"; ignore for now var ( decoded = make([]byte, 0, len(b)) chunk []byte err error ) for i := 16; i < len(b); { n := int(binary.BigEndian.Uint32(b[i : i+4])) i += 4 chunk, err = snappy.Decode(chunk, b[i:i+n]) if err != nil { return nil, err } i += n decoded = append(decoded, chunk...) } return decoded, nil } kafka-2.1.1/v2/proto/snappy_test.go000066400000000000000000000014741356004474300171760ustar00rootroot00000000000000package proto import ( "bytes" "testing" ) var snappyChunk = []byte("\x03\x08foo") // snappy encoding of "foo" func TestSnappyDecodeNormal(t *testing.T) { got, err := snappyDecode(snappyChunk) if err != nil { t.Fatal(err) } if want := []byte("foo"); !bytes.Equal(got, want) { t.Fatalf("got: %v; want: %v", got, want) } } func TestSnappyDecodeJava(t *testing.T) { javafied := []byte{ 0x82, 'S', 'N', 'A', 'P', 'P', 'Y', 0x0, // magic 0, 0, 0, 1, // version 0, 0, 0, 1, // compatible version 0, 0, 0, 5, // chunk size 0x3, 0x8, 'f', 'o', 'o', // chunk data 0, 0, 0, 5, // chunk size 0x3, 0x8, 'f', 'o', 'o', // chunk data } got, err := snappyDecode(javafied) if err != nil { t.Fatal(err) } if want := []byte("foofoo"); !bytes.Equal(got, want) { t.Fatalf("got: %v; want: %v", got, want) } } kafka-2.1.1/v2/proto/utils.go000066400000000000000000000006731356004474300157650ustar00rootroot00000000000000package proto import ( "fmt" "math" ) const ( maxParseBufSize = math.MaxInt32 ) func messageSizeError(size int) error { return fmt.Errorf("unreasonable message/block size %d (max:%d)", size, maxParseBufSize) } // allocParseBuf is used to allocate buffers used for parsing func allocParseBuf(size int) ([]byte, error) { if size < 0 || size > maxParseBufSize { return nil, messageSizeError(size) } return make([]byte, size), nil } kafka-2.1.1/v2/server_test.go000066400000000000000000000156301356004474300160260ustar00rootroot00000000000000package kafka import ( "bytes" "fmt" "net" "strconv" "sync" "time" "github.com/optiopay/kafka/v2/proto" ) const ( AnyRequest = -1 ) type Serializable interface { Bytes() ([]byte, error) } type RequestHandler func(request Serializable) (response Serializable) type Server struct { Processed int mu sync.RWMutex ln net.Listener clients map[int64]net.Conn handlers map[int16]RequestHandler } func NewServer() *Server { srv := &Server{ clients: make(map[int64]net.Conn), handlers: make(map[int16]RequestHandler), } srv.handlers[AnyRequest] = srv.defaultRequestHandler return srv } // Handle registers handler for given message kind. Handler registered with // AnyRequest kind will be used only if there is no precise handler for the // kind. func (srv *Server) Handle(reqKind int16, handler RequestHandler) { srv.mu.Lock() srv.handlers[reqKind] = handler srv.mu.Unlock() } func (srv *Server) Address() string { return srv.ln.Addr().String() } func (srv *Server) HostPort() (string, int) { host, sport, err := net.SplitHostPort(srv.ln.Addr().String()) if err != nil { panic(fmt.Sprintf("cannot split server address: %s", err)) } port, err := strconv.Atoi(sport) if err != nil { panic(fmt.Sprintf("port '%s' is not a number: %s", sport, err)) } if host == "" { host = "localhost" } return host, port } func (srv *Server) Start() { srv.mu.Lock() defer srv.mu.Unlock() if srv.ln != nil { panic("server already started") } ln, err := net.Listen("tcp4", "") if err != nil { panic(fmt.Sprintf("cannot start server: %s", err)) } srv.ln = ln go func() { for { client, err := ln.Accept() if err != nil { return } go srv.handleClient(client) } }() } func (srv *Server) Close() { srv.mu.Lock() _ = srv.ln.Close() for _, cli := range srv.clients { _ = cli.Close() } srv.clients = make(map[int64]net.Conn) srv.mu.Unlock() } func (srv *Server) handleClient(c net.Conn) { clientID := time.Now().UnixNano() srv.mu.Lock() srv.clients[clientID] = c srv.mu.Unlock() defer func() { srv.mu.Lock() delete(srv.clients, clientID) srv.mu.Unlock() }() for { kind, b, err := proto.ReadReq(c) if err != nil { return } srv.mu.RLock() fn, ok := srv.handlers[kind] if !ok { fn, ok = srv.handlers[AnyRequest] } srv.mu.RUnlock() if !ok { panic(fmt.Sprintf("no handler for %d", kind)) } var request Serializable switch kind { case proto.FetchReqKind: request, err = proto.ReadFetchReq(bytes.NewBuffer(b)) case proto.ProduceReqKind: request, err = proto.ReadProduceReq(bytes.NewBuffer(b)) case proto.OffsetReqKind: request, err = proto.ReadOffsetReq(bytes.NewBuffer(b)) case proto.MetadataReqKind: request, err = proto.ReadMetadataReq(bytes.NewBuffer(b)) case proto.ConsumerMetadataReqKind: request, err = proto.ReadConsumerMetadataReq(bytes.NewBuffer(b)) case proto.OffsetCommitReqKind: request, err = proto.ReadOffsetCommitReq(bytes.NewBuffer(b)) case proto.OffsetFetchReqKind: request, err = proto.ReadOffsetFetchReq(bytes.NewBuffer(b)) case proto.APIVersionsReqKind: request, err = proto.ReadAPIVersionsReq(bytes.NewBuffer(b)) } if err != nil { panic(fmt.Sprintf("could not read message %d: %s", kind, err)) } response := fn(request) if response != nil { b, err := response.Bytes() if err != nil { panic(fmt.Sprintf("cannot serialize %T: %s", response, err)) } if _, err := c.Write(b); err != nil { panic(fmt.Sprintf("cannot wirte to client: %s", err)) } } } } func (srv *Server) defaultRequestHandler(request Serializable) Serializable { srv.mu.RLock() defer srv.mu.RUnlock() srv.Processed++ switch req := request.(type) { case *proto.FetchReq: resp := &proto.FetchResp{ CorrelationID: req.GetCorrelationID(), Topics: make([]proto.FetchRespTopic, len(req.Topics)), } for ti, topic := range req.Topics { resp.Topics[ti] = proto.FetchRespTopic{ Name: topic.Name, Partitions: make([]proto.FetchRespPartition, len(topic.Partitions)), } for pi, part := range topic.Partitions { resp.Topics[ti].Partitions[pi] = proto.FetchRespPartition{ ID: part.ID, Err: proto.ErrUnknownTopicOrPartition, TipOffset: -1, Messages: []*proto.Message{}, } } } return resp case *proto.ProduceReq: resp := &proto.ProduceResp{ CorrelationID: req.GetCorrelationID(), } resp.Topics = make([]proto.ProduceRespTopic, len(req.Topics)) for ti, topic := range req.Topics { resp.Topics[ti] = proto.ProduceRespTopic{ Name: topic.Name, Partitions: make([]proto.ProduceRespPartition, len(topic.Partitions)), } for pi, part := range topic.Partitions { resp.Topics[ti].Partitions[pi] = proto.ProduceRespPartition{ ID: part.ID, Err: proto.ErrUnknownTopicOrPartition, Offset: -1, } } } return resp case *proto.OffsetReq: topics := make([]proto.OffsetRespTopic, len(req.Topics)) for ti := range req.Topics { var topic = &topics[ti] topic.Name = req.Topics[ti].Name topic.Partitions = make([]proto.OffsetRespPartition, len(req.Topics[ti].Partitions)) for pi := range topic.Partitions { var part = &topic.Partitions[pi] part.ID = req.Topics[ti].Partitions[pi].ID part.Err = proto.ErrUnknownTopicOrPartition } } return &proto.OffsetResp{ CorrelationID: req.GetCorrelationID(), Topics: topics, } case *proto.MetadataReq: host, sport, err := net.SplitHostPort(srv.ln.Addr().String()) if err != nil { panic(fmt.Sprintf("cannot split server address: %s", err)) } port, err := strconv.Atoi(sport) if err != nil { panic(fmt.Sprintf("port '%s' is not a number: %s", sport, err)) } if host == "" { host = "localhost" } return &proto.MetadataResp{ CorrelationID: req.GetCorrelationID(), Brokers: []proto.MetadataRespBroker{ {NodeID: 1, Host: host, Port: int32(port)}, }, Topics: []proto.MetadataRespTopic{}, } case *proto.ConsumerMetadataReq: panic("not implemented") case *proto.OffsetCommitReq: panic("not implemented") case *proto.OffsetFetchReq: panic("not implemented") case *proto.APIVersionsReq: return &proto.APIVersionsResp{ CorrelationID: req.GetCorrelationID(), APIVersions: []proto.SupportedVersion{ proto.SupportedVersion{APIKey: proto.ProduceReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.FetchReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.OffsetReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.MetadataReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.OffsetCommitReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.OffsetFetchReqKind, MinVersion: 0, MaxVersion: 0}, proto.SupportedVersion{APIKey: proto.ConsumerMetadataReqKind, MinVersion: 0, MaxVersion: 0}, }, } default: panic(fmt.Sprintf("unknown message type: %T", req)) } }