pax_global_header00006660000000000000000000000064125633147220014517gustar00rootroot0000000000000052 comment=1995aa44f5a9fd8996f809ce1d470528f5f4620d lua-resty-kafka-0.05/000077500000000000000000000000001256331472200144435ustar00rootroot00000000000000lua-resty-kafka-0.05/.gitignore000066400000000000000000000000131256331472200164250ustar00rootroot00000000000000t/servroot lua-resty-kafka-0.05/LICENSE000066400000000000000000000027121256331472200154520ustar00rootroot00000000000000Copyright (c) 2014, doujiang All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of lua-resty-kafka nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. lua-resty-kafka-0.05/Makefile000066400000000000000000000006631256331472200161100ustar00rootroot00000000000000OPENRESTY_PREFIX=/opt/openresty PREFIX ?= /usr/local LUA_INCLUDE_DIR ?= $(PREFIX)/include LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) INSTALL ?= install .PHONY: all test install all: ; install: all $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/kafka $(INSTALL) lib/resty/kafka/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/kafka test: all PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../../test-nginx/lib -r t/ lua-resty-kafka-0.05/README.md000066400000000000000000000227231256331472200157300ustar00rootroot00000000000000lua-resty-kafka =============== Lua kafka client driver for the ngx_lua based on the cosocket API Status ====== This library is still under early development and is still experimental. Description =========== This Lua library is a Kafka client driver for the ngx_lua nginx module: http://wiki.nginx.org/HttpLuaModule This Lua library takes advantage of ngx_lua's cosocket API, which ensures 100% nonblocking behavior. Note that at least [ngx_lua 0.9.3](https://github.com/openresty/lua-nginx-module/tags) or [ngx_openresty 1.4.3.7](http://openresty.org/#Download) is required, and unfortunately only LuaJIT supported (`--with-luajit`). Synopsis ======== ```lua lua_package_path "/path/to/lua-resty-kafka/lib/?.lua;;"; server { location /test { content_by_lua ' local cjson = require "cjson" local client = require "resty.kafka.client" local producer = require "resty.kafka.producer" local broker_list = { { host = "127.0.0.1", port = 9092 }, } local key = "key" local message = "halo world" -- usually we do not use this library directly local cli = client:new(broker_list) local brokers, partitions = cli:fetch_metadata("test") if not brokers then ngx.say("fetch_metadata failed, err:", partitions) end ngx.say("brokers: ", cjson.encode(brokers), "; partitions: ", cjson.encode(partitions)) -- sync producer_type local p = producer:new(broker_list) local offset, err = p:send("test", key, message) if not offset then ngx.say("send err:", err) return end ngx.say("send success, offset: ", tonumber(offset)) -- this is async producer_type and bp will be reused in the whole nginx worker local bp = producer:new(broker_list, { producer_type = "async" }) local ok, err = p:send("test", key, message) if not ok then ngx.say("send err:", err) return end ngx.say("send success, ok:", ok) '; } } ``` Modules ======= resty.kafka.client ---------------------- To load this module, just do this ```lua local client = require "resty.kafka.client" ``` ### Methods #### new `syntax: p = producer:new(broker_list, client_config)` The `broker_list` is a list of broker, like the below ```json [ { "host": "127.0.0.1", "port": 9092 } ] ``` An optional `client_config` table can be specified. The following options are as follows: client config * `socket_timeout` Specifies the network timeout threshold in milliseconds. *SHOULD* lagrer than the `request_timeout`. * `keepalive_timeout` Specifies the maximal idle timeout (in milliseconds) for the keepalive connection. * `keepalive_size` Specifies the maximal number of connections allowed in the connection pool for per Nginx worker. * `refresh_interval` Specifies the time to auto refresh the metadata in milliseconds. Then metadata will not auto refresh if is nil. #### fetch_metadata `syntax: brokers, partitions = client:fetch_metadata(topic)` In case of success, return the all brokers and partitions of the `topic`. In case of errors, returns `nil` with a string describing the error. #### refresh `syntax: brokers, partitions = client:refresh()` This will refresh the metadata of all topics which have been fetched by `fetch_metadata`. In case of success, return all brokers and all partitions of all topics. In case of errors, returns `nil` with a string describing the error. resty.kafka.producer ---------------------- To load this module, just do this ```lua local producer = require "resty.kafka.producer" ``` ### Methods #### new `syntax: p = producer:new(broker_list, producer_config)` It's recommend to use async producer_type. `broker_list` is the same as in `client` An optional options table can be specified. The following options are as follows: `socket_timeout`, `keepalive_timeout`, `keepalive_size`, `refresh_interval` are the same as in `client_config` producer config, most like in * `producer_type` Specifies the `producer.type`. "async" or "sync" * `request_timeout` Specifies the `request.timeout.ms`. Default `2000 ms` * `required_acks` Specifies the `request.required.acks`, *SHOULD NOT* be zero. Default `1`. * `max_retry` Specifies the `message.send.max.retries`. Default `3`. * `retry_backoff` Specifies the `retry.backoff.ms`. Default `100`. * `partitioner` Specifies the partitioner that choose partition from key and partition num. `syntax: partitioner = function (key, partition_num, correlation_id) end`, the correlation_id is an auto increment id in producer. Default partitioner is: ```lua local function default_partitioner(key, num, correlation_id) local id = key and crc32(key) or correlation_id -- partition_id is continuous and start from 0 return id % num end ``` buffer config ( only work `producer_type` = "async" ) * `flush_time` Specifies the `queue.buffering.max.ms`. Default `1000`. * `batch_num` Specifies the `batch.num.messages`. Default `200`. * `batch_size` Specifies the `send.buffer.bytes`. Default `1M`(may reach 2M). Be carefull, *SHOULD* be smaller than the `socket.request.max.bytes / 2 - 10k` config in kafka server. * `max_buffering` Specifies the `queue.buffering.max.messages`. Default `50,000`. * `error_handle` Specifies the error handle, handle data when buffer send to kafka error. `syntax: error_handle = function (topic, partition_id, message_queue, index, err, retryable) end`, the failed messages in the message_queue is like ```{ key1, msg1, key2, msg2 } ```, `key` in the message_queue is empty string `""` even if orign is `nil`. `index` is the message_queue length, should not use `#message_queue`. when `retryable` is `true` that means kafka server surely not committed this messages, you can safely retry to send; and else means maybe, recommend to log to somewhere. Not support compression now. #### send `syntax: ok, err = p:send(topic, key, message)` 1. In sync model In case of success, returns the offset (** cdata: LL **) of the current broker and partition. In case of errors, returns `nil` with a string describing the error. 2. In async model The `message` will write to the buffer first. It will send to the kafka server when the buffer exceed the `batch_num`, or every `flush_time` flush the buffer. It case of success, returns `true`. In case of errors, returns `nil` with a string describing the error (`buffer overflow`). #### offset `syntax: sum, details = bp:offset()` Return the sum of all the topic-partition offset (return by the ProduceRequest api); and the details of each topic-partition #### flush `syntax: ok = bp:flush()` Always return `true`. Installation ============ You need to configure the lua_package_path directive to add the path of your lua-resty-kafka source tree to ngx_lua's LUA_PATH search path, as in ```nginx # nginx.conf http { lua_package_path "/path/to/lua-resty-kafka/lib/?.lua;;"; ... } ``` Ensure that the system account running your Nginx ''worker'' proceses have enough permission to read the `.lua` file. TODO ==== 1. Fetch API 2. Offset API 3. Offset Commit/Fetch API Author ====== Dejiang Zhu (doujiang24) . Copyright and License ===================== This module is licensed under the BSD license. Copyright (C) 2014-2014, by Dejiang Zhu (doujiang24) . All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. See Also ======== * the ngx_lua module: http://wiki.nginx.org/HttpLuaModule * the kafka protocol: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol * the [lua-resty-redis](https://github.com/openresty/lua-resty-redis) library * the [lua-resty-logger-socket](https://github.com/cloudflare/lua-resty-logger-socket) library * the [sarama](https://github.com/Shopify/sarama) lua-resty-kafka-0.05/lib/000077500000000000000000000000001256331472200152115ustar00rootroot00000000000000lua-resty-kafka-0.05/lib/resty/000077500000000000000000000000001256331472200163575ustar00rootroot00000000000000lua-resty-kafka-0.05/lib/resty/kafka/000077500000000000000000000000001256331472200174345ustar00rootroot00000000000000lua-resty-kafka-0.05/lib/resty/kafka/broker.lua000066400000000000000000000025541256331472200214310ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local response = require "resty.kafka.response" local to_int32 = response.to_int32 local setmetatable = setmetatable local tcp = ngx.socket.tcp local _M = { _VERSION = "0.01" } local mt = { __index = _M } function _M.new(self, host, port, socket_config) return setmetatable({ host = host, port = port, config = socket_config, }, mt) end function _M.send_receive(self, request) local sock, err = tcp() if not sock then return nil, err, true end sock:settimeout(self.config.socket_timeout) local ok, err = sock:connect(self.host, self.port) if not ok then return nil, err, true end local bytes, err = sock:send(request:package()) if not bytes then return nil, err, true end local data, err = sock:receive(4) if not data then if err == "timeout" then sock:close() return nil, err end return nil, err, true end local len = to_int32(data) local data, err = sock:receive(len) if not data then if err == "timeout" then sock:close() return nil, err end return nil, err, true end sock:setkeepalive(self.config.keepalive_timeout, self.config.keepalive_size) return response:new(data), nil, true end return _M lua-resty-kafka-0.05/lib/resty/kafka/client.lua000066400000000000000000000126431256331472200214230ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local broker = require "resty.kafka.broker" local request = require "resty.kafka.request" local setmetatable = setmetatable local timer_at = ngx.timer.at local ngx_log = ngx.log local ERR = ngx.ERR local INFO = ngx.INFO local DEBUG = ngx.DEBUG local debug = ngx.config.debug local pid = ngx.worker.pid local time = ngx.time local sleep = ngx.sleep local ceil = math.ceil local pairs = pairs local ok, new_tab = pcall(require, "table.new") if not ok then new_tab = function (narr, nrec) return {} end end local _M = { _VERSION = "0.01" } local mt = { __index = _M } local function _metadata_cache(self, topic) if not topic then return self.brokers, self.topic_partitions end local partitions = self.topic_partitions[topic] if partitions and partitions.num and partitions.num > 0 then return self.brokers, partitions end return nil, "not foundd topic" end local function metadata_encode(client_id, topics, num) local id = 0 -- hard code correlation_id local req = request:new(request.MetadataRequest, id, client_id) req:int32(num) for i = 1, num do req:string(topics[i]) end return req end local function metadata_decode(resp) local bk_num = resp:int32() local brokers = new_tab(0, bk_num) for i = 1, bk_num do local nodeid = resp:int32(); brokers[nodeid] = { host = resp:string(), port = resp:int32(), } end local topic_num = resp:int32() local topics = new_tab(0, topic_num) for i = 1, topic_num do local tp_errcode = resp:int16() local topic = resp:string() local partition_num = resp:int32() local topic_info = new_tab(partition_num - 1, 3) topic_info.errcode = tp_errcode topic_info.num = partition_num for j = 1, partition_num do local partition_info = new_tab(0, 5) partition_info.errcode = resp:int16() partition_info.id = resp:int32() partition_info.leader = resp:int32() local repl_num = resp:int32() local replicas = new_tab(repl_num, 0) for m = 1, repl_num do replicas[m] = resp:int32() end partition_info.replicas = replicas local isr_num = resp:int32() local isr = new_tab(isr_num, 0) for m = 1, isr_num do isr[m] = resp:int32() end partition_info.isr = isr topic_info[partition_info.id] = partition_info end topics[topic] = topic_info end return brokers, topics end local function _fetch_metadata(self, new_topic) local topics, num = {}, 0 for tp, _p in pairs(self.topic_partitions) do num = num + 1 topics[num] = tp end if new_topic and not self.topic_partitions[new_topic] then num = num + 1 topics[num] = new_topic end if num == 0 then return nil, "not topic" end local broker_list = self.broker_list local sc = self.socket_config local req = metadata_encode(self.client_id, topics, num) for i = 1, #broker_list do local host, port = broker_list[i].host, broker_list[i].port local bk = broker:new(host, port, sc) local resp, err = bk:send_receive(req) if not resp then ngx_log(INFO, "broker fetch metadata failed, err:", err, host, port) else local brokers, topic_partitions = metadata_decode(resp) self.brokers, self.topic_partitions = brokers, topic_partitions return brokers, topic_partitions end end ngx_log(ERR, "all brokers failed in fetch topic metadata") return nil, "all brokers failed in fetch topic metadata" end _M.refresh = _fetch_metadata local function meta_refresh(premature, self, interval) if premature then return end _fetch_metadata(self) local ok, err = timer_at(interval, meta_refresh, self, interval) if not ok then ngx_log(ERR, "failed to create timer at meta_refresh, err: ", err) end end function _M.new(self, broker_list, client_config) local opts = client_config or {} local socket_config = { socket_timeout = opts.socket_timeout or 3000, keepalive_timeout = opts.keepalive_timeout or 600 * 1000, -- 10 min keepalive_size = opts.keepalive_size or 2, } local cli = setmetatable({ broker_list = broker_list, topic_partitions = {}, brokers = {}, client_id = "worker:" .. pid(), socket_config = socket_config, }, mt) if opts.refresh_interval then meta_refresh(nil, cli, opts.refresh_interval / 1000) -- in ms end return cli end function _M.fetch_metadata(self, topic) local brokers, partitions = _metadata_cache(self, topic) if brokers then return brokers, partitions end _fetch_metadata(self, topic) return _metadata_cache(self, topic) end function _M.choose_broker(self, topic, partition_id) local brokers, partitions = self:fetch_metadata(topic) if not brokers then return nil, partitions end local partition = partitions[partition_id] if not partition then return nil, "not found partition" end local config = brokers[partition.leader] if not config then return nil, "not found broker" end return config end return _M lua-resty-kafka-0.05/lib/resty/kafka/errors.lua000066400000000000000000000013341256331472200214540ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local _M = { [0] = 'NoError', [-1] = 'Unknown', [1] = 'OffsetOutOfRange', [2] = 'InvalidMessage', [3] = 'UnknownTopicOrPartition', [4] = 'InvalidMessageSize', [5] = 'LeaderNotAvailable', [6] = 'NotLeaderForPartition', [7] = 'RequestTimedOut', [8] = 'BrokerNotAvailable', [9] = 'ReplicaNotAvailable', [10] = 'MessageSizeTooLarge', [11] = 'StaleControllerEpochCode', [12] = 'OffsetMetadataTooLargeCode', [14] = 'OffsetsLoadInProgressCode', [15] = 'ConsumerCoordinatorNotAvailableCode', [16] = 'NotCoordinatorForConsumerCode', } _M._VERSION = "0.01" return _M lua-resty-kafka-0.05/lib/resty/kafka/producer.lua000066400000000000000000000241631256331472200217700ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local response = require "resty.kafka.response" local request = require "resty.kafka.request" local broker = require "resty.kafka.broker" local client = require "resty.kafka.client" local Errors = require "resty.kafka.errors" local sendbuffer = require "resty.kafka.sendbuffer" local ringbuffer = require "resty.kafka.ringbuffer" local setmetatable = setmetatable local timer_at = ngx.timer.at local is_exiting = ngx.worker.exiting local ngx_sleep = ngx.sleep local ngx_log = ngx.log local ERR = ngx.ERR local INFO = ngx.INFO local DEBUG = ngx.DEBUG local debug = ngx.config.debug local crc32 = ngx.crc32_short local pcall = pcall local pairs = pairs local ok, new_tab = pcall(require, "table.new") if not ok then new_tab = function (narr, nrec) return {} end end local _M = { _VERSION = "0.01" } local mt = { __index = _M } local cluster_inited local function default_partitioner(key, num, correlation_id) local id = key and crc32(key) or correlation_id -- partition_id is continuous and start from 0 return id % num end local function correlation_id(self) local id = (self.correlation_id + 1) % 1073741824 -- 2^30 self.correlation_id = id return id end local function produce_encode(self, topic_partitions) local req = request:new(request.ProduceRequest, correlation_id(self), self.client.client_id) req:int16(self.required_acks) req:int32(self.request_timeout) req:int32(topic_partitions.topic_num) for topic, partitions in pairs(topic_partitions.topics) do req:string(topic) req:int32(partitions.partition_num) for partition_id, buffer in pairs(partitions.partitions) do req:int32(partition_id) -- MessageSetSize and MessageSet req:message_set(buffer.queue, buffer.index) end end return req end local function produce_decode(resp) local topic_num = resp:int32() local ret = new_tab(0, topic_num) for i = 1, topic_num do local topic = resp:string() local partition_num = resp:int32() ret[topic] = {} for j = 1, partition_num do local partition = resp:int32() ret[topic][partition] = { errcode = resp:int16(), offset = resp:int64(), } end end return ret end local function choose_partition(self, topic, key) local brokers, partitions = self.client:fetch_metadata(topic) if not brokers then return nil, partitions end return self.partitioner(key, partitions.num, self.correlation_id) end local function _flush_lock(self) if not self.flushing then if debug then ngx_log(DEBUG, "flush lock accquired") end self.flushing = true return true end return false end local function _flush_unlock(self) if debug then ngx_log(DEBUG, "flush lock released") end self.flushing = false end local function _send(self, broker_conf, topic_partitions) local sendbuffer = self.sendbuffer local resp, retryable = nil, true local bk, err = broker:new(broker_conf.host, broker_conf.port, self.socket_config) if bk then local req = produce_encode(self, topic_partitions) resp, err, retryable = bk:send_receive(req) if resp then local result = produce_decode(resp) for topic, partitions in pairs(result) do for partition_id, r in pairs(partitions) do local errcode = r.errcode if errcode == 0 then sendbuffer:offset(topic, partition_id, r.offset) sendbuffer:clear(topic, partition_id) else err = Errors[errcode] -- XX: only 3, 5, 6 can retry local retryable0 = retryable if errcode ~= 3 and errcode ~= 5 and errcode ~= 6 then retryable0 = false end local index = sendbuffer:err(topic, partition_id, err, retryable0) ngx_log(INFO, "retry to send messages to kafka err: ", err, ", retryable: ", retryable0, ", topic: ", topic, ", partition_id: ", partition_id, ", length: ", index / 2) end end end return end end -- when broker new failed or send_receive failed for topic, partitions in pairs(topic_partitions.topics) do for partition_id, partition in pairs(partitions.partitions) do sendbuffer:err(topic, partition_id, err, retryable) end end end local function _batch_send(self, sendbuffer) local try_num = 1 while try_num <= self.max_retry do -- aggregator local send_num, sendbroker = sendbuffer:aggregator(self.client) if send_num == 0 then break end for i = 1, send_num, 2 do local broker_conf, topic_partitions = sendbroker[i], sendbroker[i + 1] _send(self, broker_conf, topic_partitions) end if sendbuffer:done() then return true end self.client:refresh() try_num = try_num + 1 if try_num < self.max_retry then ngx_sleep(self.retry_backoff / 1000) -- ms to s end end end local _flush_buffer local function _flush(premature, self) if not _flush_lock(self) then if debug then ngx_log(DEBUG, "previous flush not finished") end return end local ringbuffer = self.ringbuffer local sendbuffer = self.sendbuffer while true do local topic, key, msg = ringbuffer:pop() if not topic then break end local partition_id, err = choose_partition(self, topic, key) if not partition_id then partition_id = -1 end local overflow = sendbuffer:add(topic, partition_id, key, msg) if overflow then -- reached batch_size in one topic-partition break end end local all_done = _batch_send(self, sendbuffer) if not all_done then for topic, partition_id, buffer in sendbuffer:loop() do local queue, index, err, retryable = buffer.queue, buffer.index, buffer.err, buffer.retryable if self.error_handle then local ok, err = pcall(self.error_handle, topic, partition_id, queue, index, err, retryable) if not ok then ngx_log(ERR, "failed to callback error_handle: ", err) end else ngx_log(ERR, "buffered messages send to kafka err: ", err, ", retryable: ", retryable, ", topic: ", topic, ", partition_id: ", partition_id, ", length: ", index / 2) end sendbuffer:clear(topic, partition_id) end end _flush_unlock(self) if is_exiting() and self.ringbuffer:left_num() > 0 then -- still can create 0 timer even exiting _flush_buffer(self) end return true end _flush_buffer = function (self) local ok, err = timer_at(0, _flush, self) if not ok then ngx_log(ERR, "failed to create timer at _flush_buffer, err: ", err) end end local _timer_flush _timer_flush = function (premature, self, time) _flush_buffer(self) if premature then return end local ok, err = timer_at(time, _timer_flush, self, time) if not ok then ngx_log(ERR, "failed to create timer at _timer_flush, err: ", err) end end function _M.new(self, broker_list, producer_config) local opts = producer_config or {} local async = opts.producer_type == "async" if async and cluster_inited then return cluster_inited end local cli = client:new(broker_list, producer_config) local p = setmetatable({ client = cli, correlation_id = 1, request_timeout = opts.request_timeout or 2000, retry_backoff = opts.retry_backoff or 100, -- ms max_retry = opts.max_retry or 3, required_acks = opts.required_acks or 1, partitioner = opts.partitioner or default_partitioner, error_handle = opts.error_handle, async = async, socket_config = cli.socket_config, ringbuffer = ringbuffer:new(opts.batch_num or 200, opts.max_buffering or 50000), -- 200, 50K sendbuffer = sendbuffer:new(opts.batch_num or 200, opts.batch_size or 1048576) -- default: 1K, 1M -- batch_size should less than (MaxRequestSize / 2 - 10KiB) -- config in the kafka server, default 100M }, mt) if async then cluster_inited = p _timer_flush(nil, p, (opts.flush_time or 1000) / 1000) -- default 1s end return p end -- offset is cdata (LL in luajit) function _M.send(self, topic, key, message) if self.async then local ok, err, batch = self.ringbuffer:add(topic, key, message) if not ok then return nil, err end if batch or is_exiting() then _flush_buffer(self) end return true end local partition_id, err = choose_partition(self, topic, key) if not partition_id then return nil, err end local sendbuffer = self.sendbuffer sendbuffer:add(topic, partition_id, key, message) local ok = _batch_send(self, sendbuffer) if not ok then sendbuffer:clear(topic, partition_id) return nil, sendbuffer:err(topic, partition_id) end return sendbuffer:offset(topic, partition_id) end function _M.flush(self) return _flush(nil, self) end -- offset is cdata (LL in luajit) function _M.offset(self) local topics = self.sendbuffer.topics local sum, details = 0, {} for topic, partitions in pairs(topics) do details[topic] = {} for partition_id, buffer in pairs(partitions) do sum = sum + buffer.offset details[topic][partition_id] = buffer.offset end end return sum, details end return _M lua-resty-kafka-0.05/lib/resty/kafka/request.lua000066400000000000000000000077731256331472200216450ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local bit = require "bit" local setmetatable = setmetatable local concat = table.concat local rshift = bit.rshift local band = bit.band local char = string.char local crc32 = ngx.crc32_long local tonumber = tonumber local _M = { _VERSION = "0.01" } local mt = { __index = _M } local API_VERSION = 0 _M.ProduceRequest = 0 _M.FetchRequest = 1 _M.OffsetRequest = 2 _M.MetadataRequest = 3 _M.OffsetCommitRequest = 8 _M.OffsetFetchRequest = 9 _M.ConsumerMetadataRequest = 10 local function str_int8(int) return char(band(int, 0xff)) end local function str_int16(int) return char(band(rshift(int, 8), 0xff), band(int, 0xff)) end local function str_int32(int) -- ngx.say(debug.traceback()) return char(band(rshift(int, 24), 0xff), band(rshift(int, 16), 0xff), band(rshift(int, 8), 0xff), band(int, 0xff)) end -- XX int can be cdata: LL or lua number local function str_int64(int) return char(tonumber(band(rshift(int, 56), 0xff)), tonumber(band(rshift(int, 48), 0xff)), tonumber(band(rshift(int, 40), 0xff)), tonumber(band(rshift(int, 32), 0xff)), tonumber(band(rshift(int, 24), 0xff)), tonumber(band(rshift(int, 16), 0xff)), tonumber(band(rshift(int, 8), 0xff)), tonumber(band(int, 0xff))) end function _M.new(self, apikey, correlation_id, client_id) local c_len = #client_id local req = { 0, -- request size: int32 str_int16(apikey), str_int16(API_VERSION), str_int32(correlation_id), str_int16(c_len), client_id, } return setmetatable({ _req = req, offset = 7, len = c_len + 10, }, mt) end function _M.int16(self, int) local req = self._req local offset = self.offset req[offset] = str_int16(int) self.offset = offset + 1 self.len = self.len + 2 end function _M.int32(self, int) local req = self._req local offset = self.offset req[offset] = str_int32(int) self.offset = offset + 1 self.len = self.len + 4 end function _M.int64(self, int) local req = self._req local offset = self.offset req[offset] = str_int64(int) self.offset = offset + 1 self.len = self.len + 8 end function _M.string(self, str) local req = self._req local offset = self.offset local str_len = #str req[offset] = str_int16(str_len) req[offset + 1] = str self.offset = offset + 2 self.len = self.len + 2 + str_len end function _M.bytes(self, str) local req = self._req local offset = self.offset local str_len = #str req[offset] = str_int32(str_len) req[offset + 1] = str self.offset = offset + 2 self.len = self.len + 4 + str_len end local function message_package(key, msg) local key = key or "" local key_len = #key local len = #msg local req = { -- MagicByte str_int8(0), -- XX hard code no Compression str_int8(0), str_int32(key_len), key, str_int32(len), msg, } local str = concat(req) return crc32(str), str, key_len + len + 14 end function _M.message_set(self, messages, index) local req = self._req local off = self.offset local msg_set_size = 0 local index = index or #messages for i = 1, index, 2 do local crc32, str, msg_len = message_package(messages[i], messages[i + 1]) req[off + 1] = str_int64(0) -- offset req[off + 2] = str_int32(msg_len) -- include the crc32 length req[off + 3] = str_int32(crc32) req[off + 4] = str off = off + 4 msg_set_size = msg_set_size + msg_len + 12 end req[self.offset] = str_int32(msg_set_size) -- MessageSetSize self.offset = off + 1 self.len = self.len + 4 + msg_set_size end function _M.package(self) local req = self._req req[1] = str_int32(self.len) return req end return _M lua-resty-kafka-0.05/lib/resty/kafka/response.lua000066400000000000000000000041311256331472200217740ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local bit = require "bit" local setmetatable = setmetatable local byte = string.byte local sub = string.sub local lshift = bit.lshift local bor = bit.bor local strbyte = string.byte local _M = { _VERSION = "0.01" } local mt = { __index = _M } function _M.new(self, str) local resp = setmetatable({ str = str, offset = 1, correlation_id = 0, }, mt) resp.correlation_id = resp:int32() return resp end function _M.int16(self) local str = self.str local offset = self.offset self.offset = offset + 2 local high = byte(str, offset) -- high padded return bor((high >= 128) and 0xffff0000 or 0, lshift(high, 8), byte(str, offset + 1)) end local function to_int32(str, offset) local offset = offset or 1 local a, b, c, d = strbyte(str, offset, offset + 3) return bor(lshift(a, 24), lshift(b, 16), lshift(c, 8), d) end _M.to_int32 = to_int32 function _M.int32(self) local str = self.str local offset = self.offset self.offset = offset + 4 return to_int32(str, offset) end -- XX return cdata: LL function _M.int64(self) local offset = self.offset self.offset = offset + 8 local a, b, c, d, e, f, g, h = strbyte(self.str, offset, offset + 7) --[[ -- only 52 bit accuracy local hi = bor(lshift(a, 24), lshift(b, 16), lshift(c, 8), d) local lo = bor(lshift(f, 16), lshift(g, 8), h) return hi * 4294967296 + 16777216 * e + lo --]] return 4294967296LL * bor(lshift(a, 56), lshift(b, 48), lshift(c, 40), lshift(d, 32)) + 16777216LL * e + bor(lshift(f, 16), lshift(g, 8), h) end function _M.string(self) local len = self:int16() local offset = self.offset self.offset = offset + len return sub(self.str, offset, offset + len - 1) end function _M.bytes(self) local len = self:int32() local offset = self.offset self.offset = offset + len return sub(self.str, offset, offset + len - 1) end function _M.correlation_id(self) return self.correlation_id end return _M lua-resty-kafka-0.05/lib/resty/kafka/ringbuffer.lua000066400000000000000000000027041256331472200222730ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local setmetatable = setmetatable local ngx_null = ngx.null local ok, new_tab = pcall(require, "table.new") if not ok then new_tab = function (narr, nrec) return {} end end local _M = { _VERSION = "0.01" } local mt = { __index = _M } function _M.new(self, batch_num, max_buffering) local sendbuffer = { queue = new_tab(max_buffering * 3, 0), batch_num = batch_num, size = max_buffering * 3, start = 1, num = 0, } return setmetatable(sendbuffer, mt) end function _M.add(self, topic, key, message) local num = self.num local size = self.size if num >= size then return nil, "buffer overflow" end local index = (self.start + num) % size local queue = self.queue queue[index] = topic queue[index + 1] = key queue[index + 2] = message self.num = num + 3 return true, nil, (self.num / 3 >= self.batch_num) end function _M.pop(self) local num = self.num if num <= 0 then return nil, "empty buffer" end self.num = num - 3 local start = self.start local queue = self.queue self.start = (start + 3) % self.size local key, topic, message = queue[start], queue[start + 1], queue[start + 2] queue[start], queue[start + 1], queue[start + 2] = ngx_null, ngx_null, ngx_null return key, topic, message end function _M.left_num(self) return self.num / 3 end return _M lua-resty-kafka-0.05/lib/resty/kafka/sendbuffer.lua000066400000000000000000000114001256331472200222560ustar00rootroot00000000000000-- Copyright (C) Dejiang Zhu(doujiang24) local setmetatable = setmetatable local pairs = pairs local next = next local ok, new_tab = pcall(require, "table.new") if not ok then new_tab = function (narr, nrec) return {} end end local MAX_REUSE = 10000 local _M = { _VERSION = "0.01" } local mt = { __index = _M } function _M.new(self, batch_num, batch_size) local sendbuffer = { topics = {}, queue_num = 0, batch_num = batch_num * 2, batch_size = batch_size, } return setmetatable(sendbuffer, mt) end function _M.add(self, topic, partition_id, key, msg) local topics = self.topics if not topics[topic] then topics[topic] = {} end if not topics[topic][partition_id] then topics[topic][partition_id] = { queue = new_tab(self.batch_num, 0), index = 0, used = 0, size = 0, offset = 0, retryable = true, err = "", } end local buffer = topics[topic][partition_id] local index = buffer.index local queue = buffer.queue if index == 0 then self.queue_num = self.queue_num + 1 buffer.retryable = true end queue[index + 1] = key queue[index + 2] = msg buffer.index = index + 2 buffer.size = buffer.size + #msg + (key and #key or 0) if (buffer.size >= self.batch_size) or (buffer.index >= self.batch_num) then return true end end function _M.offset(self, topic, partition_id, offset) local buffer = self.topics[topic][partition_id] if not offset then return buffer.offset end buffer.offset = offset + (buffer.index / 2) end function _M.clear(self, topic, partition_id) local buffer = self.topics[topic][partition_id] buffer.index = 0 buffer.size = 0 buffer.used = buffer.used + 1 if buffer.used >= MAX_REUSE then buffer.queue = new_tab(self.batch_num, 0) buffer.used = 0 end self.queue_num = self.queue_num - 1 end function _M.done(self) return self.queue_num == 0 end function _M.err(self, topic, partition_id, err, retryable) local buffer = self.topics[topic][partition_id] if err then buffer.err = err buffer.retryable = retryable return buffer.index else return buffer.err, buffer.retryable end end function _M.loop(self) local topics, t, p = self.topics return function () if t then for partition_id, queue in next, topics[t], p do p = partition_id if queue.index > 0 then return t, partition_id, queue end end end for topic, partitions in next, topics, t do t = topic p = nil for partition_id, queue in next, partitions, p do p = partition_id if queue.index > 0 then return topic, partition_id, queue end end end return end end function _M.aggregator(self, client) local num = 0 local sendbroker = {} local brokers = {} local i = 1 for topic, partition_id, queue in self:loop() do if queue.retryable then local broker_conf, err = client:choose_broker(topic, partition_id) if not broker_conf then self:err(topic, partition_id, err, true) else if not brokers[broker_conf] then brokers[broker_conf] = { topics = {}, topic_num = 0, size = 0, } end local broker = brokers[broker_conf] if not broker.topics[topic] then brokers[broker_conf].topics[topic] = { partitions = {}, partition_num = 0, } broker.topic_num = broker.topic_num + 1 end local broker_topic = broker.topics[topic] broker_topic.partitions[partition_id] = queue broker_topic.partition_num = broker_topic.partition_num + 1 broker.size = broker.size + queue.size if broker.size >= self.batch_size then sendbroker[num + 1] = broker_conf sendbroker[num + 2] = brokers[broker_conf] num = num + 2 brokers[broker_conf] = nil end end end end for broker_conf, topic_partitions in pairs(brokers) do sendbroker[num + 1] = broker_conf sendbroker[num + 2] = brokers[broker_conf] num = num + 2 end return num, sendbroker end return _M lua-resty-kafka-0.05/lua-releng000077500000000000000000000034641256331472200164330ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; sub file_contains ($$); my $version; for my $file (map glob, qw{ lib/*.lua lib/*/*.lua lib/*/*/*.lua }) { # Check the sanity of each .lua file open my $in, $file or die "ERROR: Can't open $file for reading: $!\n"; my $found_ver; while (<$in>) { my ($ver, $skipping); if (/(?x) (?:_VERSION) \s* = .*? ([\d\.]*\d+) (.*? SKIP)?/) { my $orig_ver = $ver = $1; $found_ver = 1; # $skipping = $2; $ver =~ s{^(\d+)\.(\d{3})(\d{3})$}{join '.', int($1), int($2), int($3)}e; warn "$file: $orig_ver ($ver)\n"; } elsif (/(?x) (?:_VERSION) \s* = \s* ([a-zA-Z_]\S*)/) { warn "$file: $1\n"; $found_ver = 1; last; } if ($ver and $version and !$skipping) { if ($version ne $ver) { # die "$file: $ver != $version\n"; } } elsif ($ver and !$version) { $version = $ver; } } if (!$found_ver) { warn "WARNING: No \"_VERSION\" or \"version\" field found in `$file`.\n"; } close $in; print "Checking use of Lua global variables in file $file ...\n"; system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require'"); #file_contains($file, "attempt to write to undeclared variable"); system("grep -H -n -E --color '.{120}' $file"); } sub file_contains ($$) { my ($file, $regex) = @_; open my $in, $file or die "Cannot open $file fo reading: $!\n"; my $content = do { local $/; <$in> }; close $in; #print "$content"; return scalar ($content =~ /$regex/); } if (-d 't') { for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) { system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file}); } } lua-resty-kafka-0.05/t/000077500000000000000000000000001256331472200147065ustar00rootroot00000000000000lua-resty-kafka-0.05/t/buffer.t000066400000000000000000000244561256331472200163570ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: force flush --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local key = "key" local message = "halo world" local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000 }) ngx.sleep(0.1) -- will have an immediately flush by timer_flush local ok, err = p:send("test", key, message) if not ok then ngx.say("send err:", err) return end ngx.say("send ok:", ok) p:flush() local offset0 = p:offset() local ok, err = p:send("test", key, message) if not ok then ngx.say("send err:", err) return end ngx.say("send ok:", ok) p:flush() local offset1 = p:offset() ngx.say("send num:", tonumber(offset1 - offset0)) '; } --- request GET /t --- response_body send ok:true send ok:true send num:1 --- no_error_log [error] === TEST 2: timer flush --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local key = "key" local message = "halo world" local p = producer:new(broker_list, { producer_type = "async", flush_time = 1000 }) ngx.sleep(0.1) -- will have an immediately flush by timer_flush local size, err = p:send("test", key, message) if not size then ngx.say("send err:", err) return end ngx.sleep(1.1) local offset = p:offset() ngx.say("offset bigger than 0: ", tonumber(offset) > 0) '; } --- request GET /t --- response_body offset bigger than 0: true --- no_error_log [error] === TEST 3: buffer flush --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local key = "key" local message = "halo world" local p = producer:new(broker_list, { producer_type = "async", batch_num = 1, flush_time = 10000}) ngx.sleep(0.1) -- will have an immediately flush by timer_flush local ok, err = p:send("test", nil, message) if not ok then ngx.say("send err:", err) return end ngx.say("send ok:", ok) ngx.sleep(1) local offset0 = p:offset() local send_num = p:flush() local offset1 = p:offset() ngx.say("send num:", tonumber(offset1 - offset0)) '; } --- request GET /t --- response_body send ok:true send num:0 --- no_error_log [error] === TEST 4: error handle --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, } local key = "key" local message = "halo world" local error_handle = function (topic, partition_id, queue, index, err, retryable) ngx.log(ngx.ERR, "failed to send to kafka, topic: ", topic, "; partition_id: ", partition_id, "; retryable: ", retryable) end local p = producer:new(broker_list, { producer_type = "async", max_retry = 1, batch_num = 1, error_handle = error_handle }) local ok, err = p:send("test", key, message) if not ok then ngx.say("send err:", err) return end ngx.say("send ok:", ok) p:flush() '; } --- request GET /t --- response_body send ok:true --- error_log: failed to send to kafka, topic: test; partition_id: -1; retryable: true === TEST 5: wrong in error handle --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, } local key = "key" local message = "halo world" local error_handle = function (topic, partition_id, queue, index, err, retryable) local num = topic + 1 return true end ngx.log(ngx.ERR, tostring(error_handle)) local p = producer:new(broker_list, { producer_type = "async", max_retry = 1, batch_num = 1, error_handle = error_handle }) local ok, err = p:send("test", key, message) if not ok then ngx.say("send err:", err) return end ngx.say("send ok:", ok) p:flush() '; } --- request GET /t --- response_body send ok:true --- error_log: failed to callback error_handle === TEST 6: work in log phase --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' ngx.req.read_body(); local body = ngx.req.get_body_data(); ngx.say(body); '; log_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, } local key = "key" local message = ngx.var.request_body local p = producer:new(broker_list, { producer_type = "async", batch_num = 1, flush_time = 10000}) -- 1 message local size, err = p:send("test", key, message) '; } --- request POST /t Hello world --- response_body Hello world --- no_error_log [error] === TEST 7: two topic in a batch --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' ngx.req.read_body(); local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, } local key = "key" local message = ngx.req.get_body_data(); local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) ngx.sleep(0.01) -- 2 message local size, err = p:send("test", key, message) local size, err = p:send("test2", key, message) p:flush() local offset0 = p:offset() local size, err = p:send("test", key, message) local size, err = p:send("test2", key, message) p:flush() local offset1 = p:offset() ngx.say("send num:", tonumber(offset1 - offset0)) '; } --- request POST /t Hello world --- response_body send num:2 --- no_error_log [error] === TEST 8: unretryable --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' ngx.req.read_body(); local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, } local key = "key" local message = ngx.req.get_body_data(); local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) ngx.sleep(0.01) local size, err = p:send("test", key, message) p:flush() local offset0 = p:offset() -- XX: just hack for testing p.sendbuffer.topics.test[1].retryable = false local size, err = p:send("test", key, message) p:flush() local offset1 = p:offset() ngx.say("send num:", tonumber(offset1 - offset0)) '; } --- request POST /t Hello world --- response_body send num:1 --- no_error_log [error] === TEST 9: two send in a batch --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' ngx.req.read_body(); local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, } local key = "key" local message = ngx.req.get_body_data(); local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) ngx.sleep(0.01) -- 2 message local size, err = p:send("test", key, message) p:flush() local offset0 = p:offset() local size, err = p:send("test", key, message) local size, err = p:send("test", key, message) p:flush() local offset1 = p:offset() ngx.say("send num:", tonumber(offset1 - offset0)) '; } --- request POST /t Hello world --- response_body send num:2 --- no_error_log [error] lua-resty-kafka-0.05/t/client.t000066400000000000000000000041221256331472200163500ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: simple fetch --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local client = require "resty.kafka.client" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local messages = { "halo world", } local cli = client:new(broker_list) local brokers, partitions = cli:fetch_metadata("test") if not brokers then ngx.say("fetch err:", partitions) return end ngx.say(cjson.encode(partitions)) '; } --- request GET /t --- response_body_like .*replicas.* --- no_error_log [error] === TEST 2: timer refresh --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local client = require "resty.kafka.client" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local messages = { "halo world", } local cli = client:new(broker_list, { refresh_interval = 100 }) -- XXX just hack for test cli.topic_partitions = { test = {}, test1 = {} } ngx.sleep(0.5) ngx.say(cjson.encode(cli.topic_partitions)) '; } --- request GET /t --- response_body_like .*replicas.* --- no_error_log [error] lua-resty-kafka-0.05/t/producer.t000066400000000000000000000122601256331472200167170ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: simple send --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local message = "halo world" local p = producer:new(broker_list) local offset, err = p:send("test", nil, message) if not offset then ngx.say("send err:", err) return end ngx.say("offset: ", tostring(offset)) '; } --- request GET /t --- response_body_like .*offset.* --- no_error_log [error] === TEST 2: broker list has bad one --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local message = "halo world" local p, err = producer:new(broker_list) local offset, err = p:send("test", nil, message) if not offset then ngx.say("send err:", err) return end ngx.say("offset: ", tostring(offset)) '; } --- request GET /t --- response_body_like .*offset.* --- error_log: fetch_metadata === TEST 3: two send --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local key = "key" local message = "halo world" local p = producer:new(broker_list) local offset1, err = p:send("test", key, message) if not offset1 then ngx.say("send1 err:", err) return end local offset2, err = p:send("test", key, message) if not offset2 then ngx.say("send2 err:", err) return end ngx.say("offset diff: ", tonumber(offset2 - offset1)) '; } --- request GET /t --- response_body offset diff: 1 --- no_error_log [error] === TEST 4: two topic send --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local key = "key" local message = "halo world" local p = producer:new(broker_list) local offset1, err = p:send("test", key, message) if not offset1 then ngx.say("send1 err:", err) return end local offset2, err = p:send("test2", key, message) if not offset2 then ngx.say("send2 err:", err) return end ngx.say("two topic successed!") '; } --- request GET /t --- response_body two topic successed! --- no_error_log [error] === TEST 5: kafka return error --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local cjson = require "cjson" local producer = require "resty.kafka.producer" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local message = "halo world" local p, err = producer:new(broker_list) local offset, err = p:send("test", "a", message) if not offset then ngx.say("send err:", err) return end -- XX: just hack for testing p.client.topic_partitions.test = { [2] = { id = 2, leader = 0 }, [1] = { id = 1, leader = 0 }, [0] = { id = 0, leader = 0 }, num = 3 } local offset2, err = p:send("test", "b", message) if not offset2 then ngx.say("send err:", err) return end ngx.say("offset: ", tostring(offset2 - offset)) '; } --- request GET /t --- response_body send err:not found partition --- no_error_log [error] lua-resty-kafka-0.05/t/request.t000066400000000000000000000055301256331472200165660ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: simple pack --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local request = require "resty.kafka.request" local req = request:new(request.ProduceRequest, 1, "clientid") local function printx() local str = req._req[#req._req] for i = 1, #str do ngx.print(bit.tohex(string.byte(str, i), 2)) end ngx.say("") end req:int16(-1 * math.pow(2, 15)); printx() req:int16(math.pow(2, 15) - 1); printx() req:int16(-1); printx() req:int32(-1 * math.pow(2, 31)); printx() req:int32(math.pow(2, 31) - 1); printx() req:int64(-1LL * math.pow(2, 32) * math.pow(2, 31)); printx() req:int64(1ULL * math.pow(2, 32) * math.pow(2, 31) - 1); printx() '; } --- request GET /t --- response_body 8000 7fff ffff 80000000 7fffffff 8000000000000000 7fffffffffffffff --- no_error_log [error] === TEST 2: response unpack --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local request = require "resty.kafka.request" local response = require "resty.kafka.response" local function compare(func, number) local req = request:new(request.ProduceRequest, 1, "clientid") req:int32(100) local correlation_id = req._req[#req._req] req[func](req, number) local str = correlation_id .. req._req[#req._req] local resp = response:new(str) local cnumber = resp[func](resp) ngx.say(func, ": ", tostring(number), ", ", number == cnumber) end compare("int16", 0x7fff) compare("int16", 0x7fff * -1 - 1) compare("int32", 0x7fffffff) compare("int32", 0x7fffffff * -1 - 1) compare("int64", 1ULL * math.pow(2, 32) * math.pow(2, 31) - 1) compare("int64", -1LL * math.pow(2, 32) * math.pow(2, 31)) '; } --- request GET /t --- response_body int16: 32767, true int16: -32768, true int32: 2147483647, true int32: -2147483648, true int64: 9223372036854775807ULL, true int64: -9223372036854775808LL, true --- no_error_log [error] lua-resty-kafka-0.05/t/ringbuffer.t000066400000000000000000000057311256331472200172320ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: add --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local ringbuffer = require "resty.kafka.ringbuffer" local buffer = ringbuffer:new(2, 3) local topic = "test" local key = "key" local message = "halo world" local ok, err, batch = buffer:add(topic, key, message) ngx.say("add ok:", ok, "; batch:", batch) local ok, err, batch = buffer:add(topic, key, message) ngx.say("add ok:", ok, "; batch:", batch) local ok, err, batch = buffer:add(topic, key, message) local ok, err, batch = buffer:add(topic, key, message) if not ok then ngx.say("add err:", err) return end ngx.say("add ok:", ok, "; batch:", batch) '; } --- request GET /t --- response_body add ok:true; batch:false add ok:true; batch:true add err:buffer overflow --- no_error_log [error] === TEST 2: pop --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local ringbuffer = require "resty.kafka.ringbuffer" local buffer = ringbuffer:new(2, 3) for i = 1, 2 do buffer:add("topic1", "key1", "message1") buffer:add("topic2", "key2", "message2") local topic, key, message = buffer:pop() ngx.say(topic, key, message) local topic, key, message = buffer:pop() ngx.say(topic, key, message) end local topic, key, message = buffer:pop() ngx.say(topic) '; } --- request GET /t --- response_body topic1key1message1 topic2key2message2 topic1key1message1 topic2key2message2 nil --- no_error_log [error] === TEST 3: left_num --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local ringbuffer = require "resty.kafka.ringbuffer" local buffer = ringbuffer:new(2, 3) buffer:add("topic1", "key1", "message1") buffer:add("topic2", "key2", "message2") buffer:add("topic2", "key2", "message2") local topic, key, message = buffer:pop() buffer:add("topic2", "key2", "message2") local num = buffer:left_num() ngx.say("num:", num) '; } --- request GET /t --- response_body num:3 --- no_error_log [error] lua-resty-kafka-0.05/t/sendbuffer.t000066400000000000000000000134571256331472200172300ustar00rootroot00000000000000# vim:set ts=4 sw=4 et: use Test::Nginx::Socket::Lua; use Cwd qw(cwd); repeat_each(2); plan tests => repeat_each() * (3 * blocks()); my $pwd = cwd(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; }; $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; no_long_string(); #no_diff(); run_tests(); __DATA__ === TEST 1: add --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local sendbuffer = require "resty.kafka.sendbuffer" local buffer = sendbuffer:new(2, 20) local topic = "test" local partition_id = 1 local key = "key" local message = "halo world" local overflow = buffer:add(topic, partition_id, key, message) ngx.say("overflow:", overflow) local overflow = buffer:add(topic, partition_id, key, message) ngx.say("overflow:", overflow) '; } --- request GET /t --- response_body overflow:nil overflow:true --- no_error_log [error] === TEST 2: offset --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local sendbuffer = require "resty.kafka.sendbuffer" local buffer = sendbuffer:new(2, 20) local topic = "test" local partition_id = 1 local key = "key" local message = "halo world" local overflow = buffer:add(topic, partition_id, key, message) ngx.say("overflow:", overflow) local offset = buffer:offset(topic, partition_id) ngx.say("offset:", offset) local offset = buffer:offset(topic, partition_id, 100) local offset = buffer:offset(topic, partition_id) ngx.say("offset:", offset) '; } --- request GET /t --- response_body overflow:nil offset:0 offset:101 --- no_error_log [error] === TEST 3: clear --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local sendbuffer = require "resty.kafka.sendbuffer" local buffer = sendbuffer:new(2, 20) local topic = "test" local partition_id = 1 local key = "key" local message = "halo world" local overflow = buffer:add(topic, partition_id, key, message) ngx.say("overflow:", overflow) ngx.say("used:", buffer.topics[topic][partition_id].used) ngx.say("queue_num:", buffer.queue_num) buffer:clear(topic, partition_id) ngx.say("done:", buffer:done()) ngx.say("queue_num:", buffer.queue_num) for i = 1, 10000 do buffer:clear(topic, partition_id) end ngx.say("used:", buffer.topics[topic][partition_id].used) '; } --- request GET /t --- response_body overflow:nil used:0 queue_num:1 done:true queue_num:0 used:1 --- no_error_log [error] === TEST 4: loop --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local sendbuffer = require "resty.kafka.sendbuffer" local buffer = sendbuffer:new(2, 20) local topic = "test" local partition_id = 1 local key = "key" local message = "halo world" local overflow = buffer:add(topic, partition_id, key, message) local overflow = buffer:add("test2", partition_id, key, message) for t, p in buffer:loop() do ngx.say("topic:", t, "; partition_id:", p) end '; } --- request GET /t --- response_body topic:test; partition_id:1 topic:test2; partition_id:1 --- no_error_log [error] === TEST 5: aggregator --- http_config eval: $::HttpConfig --- config location /t { content_by_lua ' local sendbuffer = require "resty.kafka.sendbuffer" local client = require "resty.kafka.client" local broker_list = { { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, } local cli = client:new(broker_list) local buffer = sendbuffer:new(2, 20) local topic = "test" local partition_id = 1 local key = "key" local message = "halo world" cli:fetch_metadata(topic) cli:fetch_metadata("test2") cli:fetch_metadata("test3") cli:fetch_metadata("test4") cli:fetch_metadata("test5") local overflow = buffer:add(topic, partition_id, key, message) local overflow = buffer:add("test2", partition_id, key, message) local overflow = buffer:add("test3", partition_id, key, message) local overflow = buffer:add("test4", partition_id, key, message) local overflow = buffer:add("test5", partition_id, key, message) local num, sendbroker = buffer:aggregator(cli) ngx.say("num:", num/2) buffer:err("test5", partition_id, "timeout", false) buffer:err("test4", partition_id, "timeout", false) local num, sendbroker = buffer:aggregator(cli) ngx.say("num:", num/2) buffer:clear("test3", partition_id) buffer:clear("test2", partition_id) local num, sendbroker = buffer:aggregator(cli) ngx.say("num:", num/2) for t, p in buffer:loop() do ngx.say("topic:", t, "; partition_id:", p) end '; } --- request GET /t --- response_body num:3 num:2 num:1 topic:test5; partition_id:1 topic:test4; partition_id:1 topic:test; partition_id:1 --- no_error_log [error]