pax_global_header00006660000000000000000000000064130761205600014512gustar00rootroot0000000000000052 comment=0ab52045cde0c16f9f99dad41cee019ff88e974e kafkacat-1.3.1/000077500000000000000000000000001307612056000132615ustar00rootroot00000000000000kafkacat-1.3.1/.dir-locals.el000066400000000000000000000000521307612056000157070ustar00rootroot00000000000000( (c-mode . ((c-file-style . "linux"))) ) kafkacat-1.3.1/.doozer.json000066400000000000000000000006671307612056000155450ustar00rootroot00000000000000{ "targets": { "xenial-amd64": { "buildenv": "xenial-amd64", "builddeps": [ "build-essential", "python", "curl" ], "buildcmd": [ "./bootstrap.sh" ] }, "xenial-i386": { "buildenv": "xenial-i386", "builddeps": [ "build-essential", "python", "curl" ], "buildcmd": [ "./bootstrap.sh" ] } } } kafkacat-1.3.1/.gitignore000066400000000000000000000001401307612056000152440ustar00rootroot00000000000000\#* *~ *.o *.d kafkacat config.cache config.log* config.h Makefile.config tmp-bootstrap *.offsetkafkacat-1.3.1/.travis.yml000066400000000000000000000004771307612056000154020ustar00rootroot00000000000000language: c compiler: - gcc - clang os: - linux - osx script: ./bootstrap.sh before_install: - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get update -qq && sudo apt-get install -y libssl-dev libsasl2-dev ; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install openssl ; fikafkacat-1.3.1/LICENSE000066400000000000000000000025131307612056000142670ustar00rootroot00000000000000librdkafka - Apache Kafka C driver library Copyright (c) 2012, Magnus Edenhill All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. kafkacat-1.3.1/LICENSE.getdelim000066400000000000000000000045651307612056000160710ustar00rootroot00000000000000getdelim.c from newlib with Red Hat's copyright and the following license: (1) Red Hat Incorporated Copyright (c) 1994-2009 Red Hat, Inc. All rights reserved. This copyrighted material is made available to anyone wishing to use, modify, copy, or redistribute it subject to the terms and conditions of the BSD License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. A copy of this license is available at http://www.opensource.org/licenses. Any Red Hat trademarks that are incorporated in the source code or documentation are not subject to the BSD License and may only be used or replicated with the express permission of Red Hat, Inc. (2) University of California, Berkeley Copyright (c) 1981-2000 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. kafkacat-1.3.1/LICENSE.wingetopt000066400000000000000000000050461307612056000163120ustar00rootroot00000000000000For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt /* * Copyright (c) 2002 Todd C. Miller * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F39502-99-1-0512. */ /*- * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Dieter Baron and Thomas Klausner. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ kafkacat-1.3.1/Makefile000077500000000000000000000010041307612056000147170ustar00rootroot00000000000000include Makefile.config BIN= kafkacat SRCS_y= kafkacat.c format.c tools.c SRCS_$(ENABLE_JSON) += json.c OBJS= $(SRCS_y:.c=.o) .PHONY: all: $(BIN) include mklove/Makefile.base # librdkafka must be compiled with -gstrict-dwarf, but kafkacat must not, # due to some clang bug on OSX 10.9 CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS)) install: bin-install install-man install-man: echo $(INSTALL) -d $$DESTDIR$(man1dir) && \ echo $(INSTALL) kafkacat.1 $$DESTDIR$(man1dir) clean: bin-clean -include $(DEPS) kafkacat-1.3.1/README.md000066400000000000000000000077401307612056000145500ustar00rootroot00000000000000kafkacat ======== Copyright (c) 2014-2016 Magnus Edenhill [https://github.com/edenhill/kafkacat](https://github.com/edenhill/kafkacat) **kafkacat** is a generic non-JVM producer and consumer for Apache Kafka >=0.8, think of it as a netcat for Kafka. In **producer** mode kafkacat reads messages from stdin, delimited with a configurable delimeter (-D, defaults to newline), and produces them to the provided Kafka cluster (-b), topic (-t) and partition (-p). In **consumer** mode kafkacat reads messages from a topic and partition and prints them to stdout using the configured message delimiter. There's also support for the Kafka >=0.9 high-level balanced consumer, use the `-G ` switch and provide a list of topics to join the group. kafkacat also features a Metadata list (-L) mode to display the current state of the Kafka cluster and its topics and partitions. kafkacat is fast and lightweight; statically linked it is no more than 150Kb. # Install On recent enough Debian systems: ```` apt-get install kafkacat ```` And on Mac OS X with homebrew installed: ```` brew install kafkacat ```` Otherwise follow directions below. # Requirements * librdkafka - https://github.com/edenhill/librdkafka * libyajl (for JSON support, optional) On Ubuntu or Debian: `sudo apt-get install librdkafka-dev libyajl-dev` # Build ./configure make sudo make install # Quick build The bootstrap.sh build script will download and build the required dependencies, providing a quick and easy means of building kafkacat. Internet connectivity and wget/curl is required by this script. The resulting kafkacat binary will be linked statically to avoid runtime dependencies. **NOTE**: Requires `curl` and `cmake` (for yajl) to be installed. ./bootstrap.sh # Examples High-level balanced KafkaConsumer: subscribe to topic1 and topic2 (requires broker >=0.9.0 and librdkafka version >=0.9.1) $ kafkacat -b mybroker -G mygroup topic1 topic2 Read messages from stdin, produce to 'syslog' topic with snappy compression $ tail -f /var/log/syslog | kafkacat -b mybroker -t syslog -z snappy Read messages from Kafka 'syslog' topic, print to stdout $ kafkacat -b mybroker -t syslog Produce messages from file (one file is one message) $ kafkacat -P -b mybroker -t filedrop -p 0 myfile1.bin /etc/motd thirdfile.tgz Read the last 2000 messages from 'syslog' topic, then exit $ kafkacat -C -b mybroker -t syslog -p 0 -o -2000 -e Consume from all partitions from 'syslog' topic $ kafkacat -C -b mybroker -t syslog Output consumed messages in JSON envelope: $ kafkacat -b mybroker -t syslog -J Output consumed messages according to format string: $ kafkacat -b mybroker -t syslog -f 'Topic %t[%p], offset: %o, key: %k, payload: %S bytes: %s\n' Read the last 100 messages from topic 'syslog' with librdkafka configuration parameter 'broker.version.fallback' set to '0.8.2.1' : $ kafkacat -C -b mybroker -X broker.version.fallback=0.8.2.1 -t syslog -p 0 -o -100 -e Metadata listing ```` $ kafkacat -L -b mybroker Metadata for all topics (from broker 1: mybroker:9092/1): 3 brokers: broker 1 at mybroker:9092 broker 2 at mybrokertoo:9092 broker 3 at thirdbroker:9092 16 topics: topic "syslog" with 3 partitions: partition 0, leader 3, replicas: 1,2,3, isrs: 1,2,3 partition 1, leader 1, replicas: 1,2,3, isrs: 1,2,3 partition 2, leader 1, replicas: 1,2, isrs: 1,2 topic "rdkafkatest1_auto_49f744a4327b1b1e" with 2 partitions: partition 0, leader 3, replicas: 3, isrs: 3 partition 1, leader 1, replicas: 1, isrs: 1 topic "rdkafkatest1_auto_e02f58f2c581cba" with 2 partitions: partition 0, leader 3, replicas: 3, isrs: 3 partition 1, leader 1, replicas: 1, isrs: 1 .... ```` JSON metadata listing $ kafkacat -b mybroker -L -J Pretty-printed JSON metadata listing $ kafkacat -b mybroker -L -J | jq . Query offset(s) by timestamp(s) $ kafkacat -b mybroker -Q -t mytopic:3:2389238523 mytopic2:0:18921841 kafkacat-1.3.1/bootstrap.sh000077500000000000000000000052511307612056000156400ustar00rootroot00000000000000#!/bin/bash # # This script provides a quick build alternative: # * Dependencies are downloaded and built automatically # * kafkacat is built automatically. # * kafkacat is linked statically to avoid runtime dependencies. # # While this might not be the preferred method of building kafkacat, it # is the easiest and quickest way. # set -o errexit -o nounset -o pipefail function github_download { repo=$1 version=$2 dir=$3 url=https://github.com/${repo}/archive/${version}.tar.gz if [[ -d $dir ]]; then echo "Directory $dir already exists, not downloading $url" return 0 fi echo "Downloading $url to $dir" if which wget 2>&1 > /dev/null; then DL='wget -q -O-' else DL='curl -s -L' fi mkdir -p "$dir" pushd "$dir" > /dev/null ($DL "$url" | tar -xzf - --strip-components 1) || exit 1 popd > /dev/null } function build { dir=$1 cmds=$2 echo "Building $dir" pushd $dir > /dev/null set +o errexit eval $cmds ret=$? set -o errexit popd > /dev/null if [[ $ret == 0 ]]; then echo "Build of $dir SUCCEEDED!" else echo "Build of $dir FAILED!" fi return $ret } function pkg_cfg_lib { pkg=$1 local libs=$(PKG_CONFIG_PATH=tmp-bootstrap/usr/local/lib/pkgconfig pkg-config --libs --static $pkg) # If pkg-config isnt working try grabbing the library list manually. if [[ -z "$libs" ]]; then libs=$(grep ^Libs.private tmp-bootstrap/usr/local/lib/pkgconfig/${pkg}.pc | sed -e s'/^Libs.private: //g') fi # Since we specify the exact .a files to link further down below # we need to remove the -l here. libs=$(echo $libs | sed -e "s/-l${pkg}//g") echo " $libs" >&2 echo "Using $libs for $pkg" } mkdir -p tmp-bootstrap pushd tmp-bootstrap > /dev/null github_download "edenhill/librdkafka" "master" "librdkafka" github_download "lloyd/yajl" "master" "libyajl" build librdkafka "([ -f config.h ] || ./configure) && make && make DESTDIR=\"${PWD}/\" install" || (echo "Failed to build librdkafka: bootstrap failed" ; false) build libyajl "([ -f config.h ] || ./configure) && make && make DESTDIR=\"${PWD}/\" install" || (echo "Failed to build libyajl: JSON support will probably be disabled" ; true) popd > /dev/null echo "Building kafkacat" export CPPFLAGS="${CPPFLAGS:-} -Itmp-bootstrap/usr/local/include" export LIBS="$(pkg_cfg_lib rdkafka) $(pkg_cfg_lib yajl)" export STATIC_LIB_rdkafka="tmp-bootstrap/usr/local/lib/librdkafka.a" export STATIC_LIB_yajl="tmp-bootstrap/usr/local/lib/libyajl_s.a" ./configure --enable-static --enable-json make echo "" echo "Success! kafkacat is now built" echo "" ./kafkacat -h kafkacat-1.3.1/configure000077500000000000000000000110361307612056000151710ustar00rootroot00000000000000#!/usr/bin/env bash # BASHVER=$(expr ${BASH_VERSINFO[0]} \* 1000 + ${BASH_VERSINFO[1]}) if [ "$BASHVER" -lt 3002 ]; then echo "ERROR: mklove requires bash version 3.2 or later but you are using $BASH_VERSION ($BASHVER)" echo " See https://github.com/edenhill/mklove/issues/15" exit 1 fi MKL_CONFIGURE_ARGS="$0 $*" # Load base module source mklove/modules/configure.base # Read some special command line options right away that must be known prior to # sourcing modules. mkl_in_list "$*" "--no-download" && MKL_NO_DOWNLOAD=1 # Disable downloads when --help is used to avoid blocking calls. mkl_in_list "$*" "--help" && MKL_NO_DOWNLOAD=1 mkl_in_list "$*" "--debug" && MKL_DEBUG=1 # This is the earliest possible time to check for color support in # terminal because mkl_check_terminal_color_support uses mkl_dbg which # needs to know if MKL_DEBUG is set mkl_check_terminal_color_support # Delete temporary Makefile and header files on exit. trap "{ rm -f $MKL_OUTMK $MKL_OUTH; }" EXIT ## ## Load builtin modules ## # Builtin options, etc. mkl_require builtin # Host/target support mkl_require host # Compiler detection mkl_require cc # Load application provided modules (in current directory), if any. for fname in configure.* ; do if [[ $fname = 'configure.*' ]]; then continue fi # Skip temporary files if [[ $fname = *~ ]]; then continue fi mkl_require $fname done ## ## Argument parsing (options) ## ## _SAVE_ARGS="$*" # Parse arguments while [[ ! -z $@ ]]; do if [[ $1 != --* ]]; then mkl_err "Unknown non-option argument: $1" mkl_usage exit 1 fi opt=${1#--} shift if [[ $opt = *=* ]]; then name="${opt%=*}" arg="${opt#*=}" eqarg=1 else name="$opt" arg="" eqarg=0 fi safeopt="$(mkl_env_esc $name)" if ! mkl_func_exists opt_$safeopt ; then mkl_err "Unknown option $opt" mkl_usage exit 1 fi # Check if this option needs an argument. reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)") if [[ ! -z $reqarg ]]; then if [[ $eqarg == 0 && -z $arg ]]; then arg=$1 shift if [[ -z $arg ]]; then mkl_err "Missing argument to option --$name $reqarg" exit 1 fi fi else if [[ ! -z $arg ]]; then mkl_err "Option --$name expects no argument" exit 1 fi arg=y fi case $name in re|reconfigure) oldcmd=$(grep '^# configure exec: ' config.log | \ sed -e 's/^\# configure exec: [^ ]*configure\( \|$\)//') echo "Reconfiguring: $0 $oldcmd" exec $0 $oldcmd ;; list-modules) echo "Modules loaded:" for mod in $MKL_MODULES ; do echo " $mod" done exit 0 ;; list-checks) echo "Check functions in calling order:" for mf in $MKL_CHECKS ; do mod=${mf%:*} func=${mf#*:} echo -e "${MKL_GREEN}From module $mod:$MKL_CLR_RESET" declare -f $func echo "" done exit 0 ;; update-modules) fails=0 echo "Updating modules" for mod in $MKL_MODULES ; do echo -n "Updating $mod..." if mkl_module_download "$mod" > /dev/null ; then echo -e "${MKL_GREEN}ok${MKL_CLR_RESET}" else echo -e "${MKL_RED}failed${MKL_CLR_RESET}" fails=$(expr $fails + 1) fi done exit $fails ;; help) mkl_usage exit 0 ;; *) opt_$safeopt $arg || exit 1 mkl_var_append MKL_OPTS_SET "$safeopt" ;; esac done if [[ ! -z $MKL_CLEAN ]]; then mkl_clean exit 0 fi # Move away previous log file [[ -f $MKL_OUTDBG ]] && mv $MKL_OUTDBG ${MKL_OUTDBG}.old # Create output files echo "# configure exec: $0 $_SAVE_ARGS" >> $MKL_OUTDBG echo "# On $(date)" >> $MKL_OUTDBG rm -f $MKL_OUTMK $MKL_OUTH # Load cache file mkl_cache_read # Run checks mkl_checks_run # Check accumulated failures, will not return on failure. mkl_check_fails # Generate outputs mkl_generate # Summarize what happened mkl_summary # Write cache file mkl_cache_write echo "" echo "Now type 'make' to build" trap - EXIT exit 0 kafkacat-1.3.1/configure.kafkacat000066400000000000000000000032041307612056000167300ustar00rootroot00000000000000#!/bin/bash # mkl_require good_cflags mkl_require gitversion as KAFKACAT_VERSION default 1.3.1 function checks { # Check that librdkafka is available, and allow to link it statically. mkl_meta_set "rdkafka" "desc" "librdkafka is available at http://github.com/edenhill/librdkafka. To quickly download all dependencies and build kafkacat try ./bootstrap.sh" mkl_meta_set "rdkafka" "deb" "librdkafka-dev" mkl_lib_check --static=-lrdkafka "rdkafka" "" fail CC "-lrdkafka" \ "#include " # Make sure rdkafka is new enough. mkl_meta_set "librdkafkaver" "name" "librdkafka metadata API" mkl_meta_set "librdkafkaver" "desc" "librdkafka 0.8.4 or later is required for the Metadata API" mkl_compile_check "librdkafkaver" "" fail CC "" \ "#include struct rd_kafka_metadata foo;" # Enable KafkaConsumer support if librdkafka is new enough mkl_meta_set "librdkafka_ge_090" "name" "librdkafka KafkaConsumer support" mkl_compile_check "librdkafka_ge_090" ENABLE_KAFKACONSUMER disable CC "" " #include #if RD_KAFKA_VERSION >= 0x00090000 #else #error \"rdkafka version < 0.9.0\" #endif" mkl_meta_set "yajl" "deb" "libyajl-dev" # Check for JSON library (yajl) if [[ $WITH_JSON == y ]] && \ mkl_lib_check --static=-lyajl "yajl" HAVE_YAJL disable CC "-lyajl" \ "#include #if YAJL_MAJOR >= 2 #else #error \"Requires libyajl2\" #endif " then mkl_allvar_set "json" ENABLE_JSON y fi } mkl_toggle_option "kafkacat" WITH_JSON --enable-json "JSON support (requires libyajl2)" y kafkacat-1.3.1/format.c000066400000000000000000000233471307612056000147260ustar00rootroot00000000000000/* * kafkacat - Apache Kafka consumer and producer * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "kafkacat.h" #ifndef _MSC_VER #include /* for htonl() */ #endif static void fmt_add (fmt_type_t type, const char *str, int len) { if (conf.fmt_cnt == KC_FMT_MAX_SIZE) FATAL("Too many formatters & strings (KC_FMT_MAX_SIZE=%i)", KC_FMT_MAX_SIZE); conf.fmt[conf.fmt_cnt].type = type; /* For STR types */ if (len) { const char *s; char *d; conf.fmt[conf.fmt_cnt].str = d = malloc(len+1); memcpy(d, str, len); d[len] = '\0'; s = d; /* Convert \.. sequences */ while (*s) { if (*s == '\\' && *(s+1)) { int base = 0; const char *next; s++; switch (*s) { case 't': *d = '\t'; break; case 'n': *d = '\n'; break; case 'r': *d = '\r'; break; case 'x': s++; base = 16; /* FALLTHRU */ default: if (*s >= '0' && *s <= '9') { *d = (char)strtoul(s, (char **)&next, base); if (next > s) s = next - 1; } else { *d = *s; } break; } } else { *d = *s; } s++; d++; } *d = '\0'; conf.fmt[conf.fmt_cnt].str_len = strlen(conf.fmt[conf.fmt_cnt].str); } conf.fmt_cnt++; } /** * Parse a format string to create a formatter list. */ void fmt_parse (const char *fmt) { const char *s = fmt, *t; while (*s) { if ((t = strchr(s, '%'))) { if (t > s) fmt_add(KC_FMT_STR, s, (int)(t-s)); s = t+1; switch (*s) { case 'o': fmt_add(KC_FMT_OFFSET, NULL, 0); break; case 'k': fmt_add(KC_FMT_KEY, NULL, 0); break; case 'K': fmt_add(KC_FMT_KEY_LEN, NULL, 0); break; case 's': fmt_add(KC_FMT_PAYLOAD, NULL, 0); break; case 'S': fmt_add(KC_FMT_PAYLOAD_LEN, NULL, 0); break; case 'R': fmt_add(KC_FMT_PAYLOAD_LEN_BINARY, NULL, 0); break; case 't': fmt_add(KC_FMT_TOPIC, NULL, 0); break; case 'p': fmt_add(KC_FMT_PARTITION, NULL, 0); break; case 'T': fmt_add(KC_FMT_TIMESTAMP, NULL, 0); conf.flags |= CONF_F_APIVERREQ; break; case '%': fmt_add(KC_FMT_STR, s, 1); break; case '\0': FATAL("Empty formatter"); break; default: FATAL("Unsupported formatter: %%%c", *s); break; } s++; } else { fmt_add(KC_FMT_STR, s, strlen(s)); break; } } } void fmt_init (void) { #ifdef ENABLE_JSON if (conf.flags & CONF_F_FMT_JSON) fmt_init_json(); #endif } void fmt_term (void) { #ifdef ENABLE_JSON if (conf.flags & CONF_F_FMT_JSON) fmt_term_json(); #endif } /** * Delimited output */ static void fmt_msg_output_str (FILE *fp, const rd_kafka_message_t *rkmessage) { int i; for (i = 0 ; i < conf.fmt_cnt ; i++) { int r = 1; uint32_t belen; switch (conf.fmt[i].type) { case KC_FMT_OFFSET: r = fprintf(fp, "%"PRId64, rkmessage->offset); break; case KC_FMT_KEY: if (rkmessage->key_len) r = fwrite(rkmessage->key, rkmessage->key_len, 1, fp); else if (conf.flags & CONF_F_NULL) r = fwrite(conf.null_str, conf.null_str_len, 1, fp); break; case KC_FMT_KEY_LEN: r = fprintf(fp, "%zd", /* Use -1 to indicate NULL keys */ rkmessage->key ? (ssize_t)rkmessage->key_len : -1); break; case KC_FMT_PAYLOAD: if (rkmessage->len) r = fwrite(rkmessage->payload, rkmessage->len, 1, fp); else if (conf.flags & CONF_F_NULL) r = fwrite(conf.null_str, conf.null_str_len, 1, fp); break; case KC_FMT_PAYLOAD_LEN: r = fprintf(fp, "%zd", /* Use -1 to indicate NULL messages */ rkmessage->payload ? (ssize_t)rkmessage->len : -1); break; case KC_FMT_PAYLOAD_LEN_BINARY: /* Use -1 to indicate NULL messages */ belen = htonl((uint32_t)(rkmessage->payload ? (ssize_t)rkmessage->len : -1)); r = fwrite(&belen, sizeof(uint32_t), 1, fp); break; case KC_FMT_STR: r = fwrite(conf.fmt[i].str, conf.fmt[i].str_len, 1, fp); break; case KC_FMT_TOPIC: r = fprintf(fp, "%s", rd_kafka_topic_name(rkmessage->rkt)); break; case KC_FMT_PARTITION: r = fprintf(fp, "%"PRId32, rkmessage->partition); break; #if RD_KAFKA_VERSION >= 0x000902ff case KC_FMT_TIMESTAMP: { rd_kafka_timestamp_type_t tstype; r = fprintf(fp, "%"PRId64, rd_kafka_message_timestamp(rkmessage, &tstype)); #else r = fprintf(fp, "-1"); #endif break; } } if (r < 1) FATAL("Write error for message " "of %zd bytes at offset %"PRId64"): %s", rkmessage->len, rkmessage->offset, strerror(errno)); } } /** * Format and output a received message. */ void fmt_msg_output (FILE *fp, const rd_kafka_message_t *rkmessage) { #ifdef ENABLE_JSON if (conf.flags & CONF_F_FMT_JSON) fmt_msg_output_json(fp, rkmessage); else #endif fmt_msg_output_str(fp, rkmessage); } kafkacat-1.3.1/json.c000066400000000000000000000217441307612056000144060ustar00rootroot00000000000000/* * kafkacat - Apache Kafka consumer and producer * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "kafkacat.h" #include #define JS_STR(G, STR) do { \ const char *_s = (STR); \ yajl_gen_string(G, (const unsigned char *)_s, strlen(_s)); \ } while (0) void fmt_msg_output_json (FILE *fp, const rd_kafka_message_t *rkmessage) { yajl_gen g; const char *topic = rd_kafka_topic_name(rkmessage->rkt); const unsigned char *buf; size_t len; g = yajl_gen_alloc(NULL); yajl_gen_map_open(g); JS_STR(g, "topic"); JS_STR(g, topic); JS_STR(g, "partition"); yajl_gen_integer(g, (int)rkmessage->partition); JS_STR(g, "offset"); yajl_gen_integer(g, (long long int)rkmessage->offset); JS_STR(g, "key"); yajl_gen_string(g, (const unsigned char *)rkmessage->key, rkmessage->key_len); JS_STR(g, "payload"); yajl_gen_string(g, (const unsigned char *)rkmessage->payload, rkmessage->len); yajl_gen_map_close(g); yajl_gen_get_buf(g, &buf, &len); if (fwrite(buf, len, 1, fp) != 1 || (conf.fmt[0].str_len > 0 && fwrite(conf.fmt[0].str, conf.fmt[0].str_len, 1, fp) != 1)) FATAL("Output write error: %s", strerror(errno)); yajl_gen_free(g); } /** * Print metadata information */ void metadata_print_json (const struct rd_kafka_metadata *metadata) { yajl_gen g; int i, j, k; const unsigned char *buf; size_t len; g = yajl_gen_alloc(NULL); yajl_gen_map_open(g); JS_STR(g, "originating_broker"); yajl_gen_map_open(g); JS_STR(g, "id"); yajl_gen_integer(g, (long long int)metadata->orig_broker_id); JS_STR(g, "name"); JS_STR(g, metadata->orig_broker_name); yajl_gen_map_close(g); JS_STR(g, "query"); yajl_gen_map_open(g); JS_STR(g, "topic"); JS_STR(g, conf.topic ? : "*"); yajl_gen_map_close(g); /* Iterate brokers */ JS_STR(g, "brokers"); yajl_gen_array_open(g); for (i = 0 ; i < metadata->broker_cnt ; i++) { int blen = strlen(metadata->brokers[i].host); char *host = alloca(blen+1+5+1); sprintf(host, "%s:%i", metadata->brokers[i].host, metadata->brokers[i].port); yajl_gen_map_open(g); JS_STR(g, "id"); yajl_gen_integer(g, (long long int)metadata->brokers[i].id); JS_STR(g, "name"); JS_STR(g, host); yajl_gen_map_close(g); } yajl_gen_array_close(g); /* Iterate topics */ JS_STR(g, "topics"); yajl_gen_array_open(g); for (i = 0 ; i < metadata->topic_cnt ; i++) { const struct rd_kafka_metadata_topic *t = &metadata->topics[i]; yajl_gen_map_open(g); JS_STR(g, "topic"); JS_STR(g, t->topic); if (t->err) { JS_STR(g, "error"); JS_STR(g, rd_kafka_err2str(t->err)); } JS_STR(g, "partitions"); yajl_gen_array_open(g); /* Iterate topic's partitions */ for (j = 0 ; j < t->partition_cnt ; j++) { const struct rd_kafka_metadata_partition *p; p = &t->partitions[j]; yajl_gen_map_open(g); JS_STR(g, "partition"); yajl_gen_integer(g, (long long int)p->id); if (p->err) { JS_STR(g, "error"); JS_STR(g, rd_kafka_err2str(p->err)); } JS_STR(g, "leader"); yajl_gen_integer(g, (long long int)p->leader); /* Iterate partition's replicas */ JS_STR(g, "replicas"); yajl_gen_array_open(g); for (k = 0 ; k < p->replica_cnt ; k++) { yajl_gen_map_open(g); JS_STR(g, "id"); yajl_gen_integer(g, (long long int)p->replicas[k]); yajl_gen_map_close(g); } yajl_gen_array_close(g); /* Iterate partition's ISRs */ JS_STR(g, "isrs"); yajl_gen_array_open(g); for (k = 0 ; k < p->isr_cnt ; k++) { yajl_gen_map_open(g); JS_STR(g, "id"); yajl_gen_integer(g, (long long int)p->isrs[k]); yajl_gen_map_close(g); } yajl_gen_array_close(g); yajl_gen_map_close(g); } yajl_gen_array_close(g); yajl_gen_map_close(g); } yajl_gen_array_close(g); yajl_gen_map_close(g); yajl_gen_get_buf(g, &buf, &len); if (fwrite(buf, len, 1, stdout) != 1) FATAL("Output write error: %s", strerror(errno)); yajl_gen_free(g); } /** * @brief Generate (if json_gen is a valid yajl_gen), or print (if json_gen is NULL) * a map of topic+partitions+offsets[+errors] * * { "": { "topic": "", * "": { "partition": , "offset": , * ["error": "..."]}, * .. }, * .. } */ void partition_list_print_json (const rd_kafka_topic_partition_list_t *parts, void *json_gen) { yajl_gen g = (yajl_gen)json_gen; int i; const char *last_topic = ""; if (!g) g = yajl_gen_alloc(NULL); yajl_gen_map_open(g); for (i = 0 ; i < parts->cnt ; i++) { const rd_kafka_topic_partition_t *p = &parts->elems[i]; char partstr[16]; if (strcmp(last_topic, p->topic)) { if (*last_topic) yajl_gen_map_close(g); /* topic */ JS_STR(g, p->topic); yajl_gen_map_open(g); /* topic */ JS_STR(g, "topic"); JS_STR(g, p->topic); last_topic = p->topic; } snprintf(partstr, sizeof(partstr), "%"PRId32, p->partition); JS_STR(g, partstr); yajl_gen_map_open(g); JS_STR(g, "partition"); yajl_gen_integer(g, p->partition); JS_STR(g, "offset"); yajl_gen_integer(g, p->offset); if (p->err) { JS_STR(g, "error"); JS_STR(g, rd_kafka_err2str(p->err)); } yajl_gen_map_close(g); } if (*last_topic) yajl_gen_map_close(g); /* topic */ yajl_gen_map_close(g); if (!json_gen) { const unsigned char *buf; size_t len; yajl_gen_get_buf(g, &buf, &len); (void)fwrite(buf, len, 1, stdout); yajl_gen_free(g); } } void fmt_init_json (void) { } void fmt_term_json (void) { } kafkacat-1.3.1/kafkacat.1000066400000000000000000000026311307612056000151120ustar00rootroot00000000000000.Dd $Mdocdate: December 09 2014 $ .Dt KAFKACAT 1 .Os .Sh NAME .Nm kafkacat .Nd generic producer and consumer for Apache Kafka .Sh SYNOPSIS .Nm .Fl C | P | L .Fl t Ar topic .Op Fl p Ar partition .Fl b Ar brokers Op , Ar ... .Op Fl D Ar delim .Op Fl K Ar delim .Op Fl c Ar cnt .Op Fl X Ar list .Op Fl X Ar prop=val .Op Fl X Ar dump .Op Fl d Ar dbg Op , Ar ... .Op Fl q .Op Fl v .Op Fl Z .Op specific options .Nm .Fl C .Op generic options .Op Fl o Ar offset .Op Fl e .Op Fl O .Op Fl u .Op Fl J .Op Fl f Ar fmtstr .Nm .Fl P .Op generic options .Op Fl z Ar snappy | gzip .Op Fl p Li -1 .Op Ar file Op ... .Nm .Fl L .Op generic options .Op Fl t Ar topic .Sh DESCRIPTION .Nm is a generic non-JVM producer and consumer for Apache Kafka 0.8, think of it as a netcat for Kafka. .Pp In producer mode ( .Fl P ), .Nm reads messages from stdin, delimited with a configurable delimeter and produces them to the provided Kafka cluster, topic and partition. In consumer mode ( .Fl C ), .Nm reads messages from a topic and partition and prints them to stdout using the configured message delimiter. .Pp If neither .Fl P or .Fl C are specified .Nm attempts to figure out the mode automatically based on stdin/stdout tty types. .Pp .Nm also features a metadata list mode ( .Fl L ), to display the current state of the Kafka cluster and its topics and partitions. .Sh SEE ALSO For a more extensive help and some simple examples, run .Nm with .Fl h flag. kafkacat-1.3.1/kafkacat.c000066400000000000000000001520411307612056000151750ustar00rootroot00000000000000/* * kafkacat - Apache Kafka consumer and producer * * Copyright (c) 2014, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _MSC_VER #include #include #include #include #else #pragma comment(lib, "ws2_32.lib") #include "win32/wingetopt.h" #include #endif #include #include #include #include #include #include #include #include "kafkacat.h" struct conf conf = { .run = 1, .verbosity = 1, .exitonerror = 1, .partition = RD_KAFKA_PARTITION_UA, .msg_size = 1024*1024, .null_str = "NULL", }; static struct stats { uint64_t tx; uint64_t tx_err_q; uint64_t tx_err_dr; uint64_t tx_delivered; uint64_t rx; } stats; /* Partition's at EOF state array */ int *part_eof = NULL; /* Number of partitions that has reached EOF */ int part_eof_cnt = 0; /* Threshold level (partitions at EOF) before exiting */ int part_eof_thres = 0; /** * Fatal error: print error and exit */ void RD_NORETURN fatal0 (const char *func, int line, const char *fmt, ...) { va_list ap; char buf[1024]; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); INFO(2, "Fatal error at %s:%i:\n", func, line); fprintf(stderr, "%% ERROR: %s\n", buf); exit(1); } /** * Print error and exit if needed */ void error0 (int exitonerror, const char *func, int line, const char *fmt, ...) { va_list ap; char buf[1024]; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if (exitonerror) INFO(2, "Error at %s:%i:\n", func, line); fprintf(stderr, "%% ERROR: %s%s\n", buf, exitonerror ? " : terminating":""); if (exitonerror) exit(1); } /** * The delivery report callback is called once per message to * report delivery success or failure. */ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { static int say_once = 1; if (rkmessage->err) { INFO(1, "Delivery failed for message: %s\n", rd_kafka_err2str(rkmessage->err)); stats.tx_err_dr++; return; } INFO(3, "Message delivered to partition %"PRId32" (offset %"PRId64")\n", rkmessage->partition, rkmessage->offset); if (rkmessage->offset == 0 && say_once) { INFO(3, "Enable message offset reporting " "with '-X topic.produce.offset.report=true'\n"); say_once = 0; } stats.tx_delivered++; } /** * Produces a single message, retries on queue congestion, and * exits hard on error. */ static void produce (void *buf, size_t len, const void *key, size_t key_len, int msgflags) { /* Produce message: keep trying until it succeeds. */ do { rd_kafka_resp_err_t err; if (!conf.run) FATAL("Program terminated while " "producing message of %zd bytes", len); if (rd_kafka_produce(conf.rkt, conf.partition, msgflags, buf, len, key, key_len, NULL) != -1) { stats.tx++; break; } err = rd_kafka_errno2err(errno); if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) FATAL("Failed to produce message (%zd bytes): %s", len, rd_kafka_err2str(err)); stats.tx_err_q++; /* Internal queue full, sleep to allow * messages to be produced/time out * before trying again. */ rd_kafka_poll(conf.rk, 5); } while (1); /* Poll for delivery reports, errors, etc. */ rd_kafka_poll(conf.rk, 0); } /** * Produce contents of file as a single message. * Returns the file length on success, else -1. */ static ssize_t produce_file (const char *path) { int fd; void *ptr; struct stat st; ssize_t sz; int msgflags = 0; if ((fd = _COMPAT(open)(path, O_RDONLY)) == -1) { INFO(1, "Failed to open %s: %s\n", path, strerror(errno)); return -1; } if (fstat(fd, &st) == -1) { INFO(1, "Failed to stat %s: %s\n", path, strerror(errno)); _COMPAT(close)(fd); return -1; } if (st.st_size == 0) { INFO(3, "Skipping empty file %s\n", path); _COMPAT(close)(fd); return 0; } #ifndef _MSC_VER ptr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); if (ptr == MAP_FAILED) { INFO(1, "Failed to mmap %s: %s\n", path, strerror(errno)); _COMPAT(close)(fd); return -1; } sz = st.st_size; msgflags = RD_KAFKA_MSG_F_COPY; #else ptr = malloc(st.st_size); if (!ptr) { INFO(1, "Failed to allocate message for %s: %s\n", path, strerror(errno)); _COMPAT(close)(fd); return -1; } sz = _read(fd, ptr, st.st_size); if (sz < st.st_size) { INFO(1, "Read failed for %s (%zd/%zd): %s\n", path, sz, (size_t)st.st_size, sz == -1 ? strerror(errno) : "incomplete read"); free(ptr); close(fd); return -1; } msgflags = RD_KAFKA_MSG_F_FREE; #endif INFO(4, "Producing file %s (%"PRIdMAX" bytes)\n", path, (intmax_t)st.st_size); produce(ptr, sz, NULL, 0, msgflags); _COMPAT(close)(fd); if (!(msgflags & RD_KAFKA_MSG_F_FREE)) { #ifndef _MSC_VER munmap(ptr, st.st_size); #else free(ptr); #endif } return sz; } /** * Run producer, reading messages from 'fp' and producing to kafka. * Or if 'pathcnt' is > 0, read messages from files in 'paths' instead. */ static void producer_run (FILE *fp, char **paths, int pathcnt) { char *sbuf = NULL; size_t size = 0; ssize_t len; char errstr[512]; /* Assign per-message delivery report callback. */ rd_kafka_conf_set_dr_msg_cb(conf.rk_conf, dr_msg_cb); /* Create producer */ if (!(conf.rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create producer: %s", errstr); if (!conf.debug && conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Create topic */ if (!(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; if (pathcnt > 0 && !(conf.flags & CONF_F_LINE)) { int i; int good = 0; /* Read messages from files, each file is its own message. */ for (i = 0 ; i < pathcnt ; i++) if (produce_file(paths[i]) != -1) good++; if (!good) conf.exitcode = 1; else if (good < pathcnt) INFO(1, "Failed to produce from %i/%i files\n", pathcnt - good, pathcnt); } else { /* Read messages from input, delimited by conf.delim */ while (conf.run && (len = getdelim(&sbuf, &size, conf.delim, fp)) != -1) { int msgflags = 0; char *buf = sbuf; char *key = NULL; size_t key_len = 0; size_t orig_len = len; if (len == 0) continue; /* Shave off delimiter */ if ((int)buf[len-1] == conf.delim) len--; if (len == 0) continue; /* Extract key, if desired and found. */ if (conf.flags & CONF_F_KEY_DELIM) { char *t; if ((t = memchr(buf, conf.key_delim, len))) { key_len = (size_t)(t-sbuf); key = buf; buf += key_len+1; len -= key_len+1; /* Since buf has been forwarded * from its initial allocation point * we must make sure we dont tell * librdkafka to free it (since the * address would be wrong). */ msgflags |= RD_KAFKA_MSG_F_COPY; if (conf.flags & CONF_F_NULL) { if (len == 0) buf = NULL; if (key_len == 0) key = NULL; } } } if (!(msgflags & RD_KAFKA_MSG_F_COPY) && len > 1024 && !(conf.flags & CONF_F_TEE)) { /* If message is larger than this arbitrary * threshold it will be more effective to * not copy the data but let rdkafka own it * instead. * * Note that CONF_T_TEE must be checked, * otherwise a possible race might occur. * */ msgflags |= RD_KAFKA_MSG_F_FREE; } else { /* For smaller messages a copy is * more efficient. */ msgflags |= RD_KAFKA_MSG_F_COPY; } /* Produce message */ produce(buf, len, key, key_len, msgflags); if (conf.flags & CONF_F_TEE && fwrite(sbuf, orig_len, 1, stdout) != 1) FATAL("Tee write error for message of %zd bytes: %s", orig_len, strerror(errno)); if (msgflags & RD_KAFKA_MSG_F_FREE) { /* rdkafka owns the allocated buffer * memory now. */ sbuf = NULL; size = 0; } /* Enforce -c */ if (stats.tx == (uint64_t)conf.msg_cnt) conf.run = 0; } if (conf.run) { if (!feof(fp)) FATAL("Unable to read message: %s", strerror(errno)); } } /* Wait for all messages to be transmitted */ conf.run = 1; while (conf.run && rd_kafka_outq_len(conf.rk)) rd_kafka_poll(conf.rk, 50); rd_kafka_topic_destroy(conf.rkt); rd_kafka_destroy(conf.rk); if (sbuf) free(sbuf); if (stats.tx_err_dr) conf.exitcode = 1; } static void handle_partition_eof (rd_kafka_message_t *rkmessage) { if (conf.mode == 'C') { /* Store EOF offset. * If partition is empty and at offset 0, * store future first message (0). */ rd_kafka_offset_store(rkmessage->rkt, rkmessage->partition, rkmessage->offset == 0 ? 0 : rkmessage->offset-1); if (conf.exit_eof) { if (!part_eof[rkmessage->partition]) { /* Stop consuming this partition */ rd_kafka_consume_stop(rkmessage->rkt, rkmessage->partition); part_eof[rkmessage->partition] = 1; part_eof_cnt++; if (part_eof_cnt >= part_eof_thres) conf.run = 0; } } } else if (conf.mode == 'G') { /* FIXME: Not currently handled */ } INFO(1, "Reached end of topic %s [%"PRId32"] " "at offset %"PRId64"%s\n", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, !conf.run ? ": exiting" : ""); } /** * Consume callback, called for each message consumed. */ static void consume_cb (rd_kafka_message_t *rkmessage, void *opaque) { FILE *fp = opaque; if (!conf.run) return; if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { handle_partition_eof(rkmessage); return; } FATAL("Topic %s [%"PRId32"] error: %s", rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rd_kafka_message_errstr(rkmessage)); } /* Print message */ fmt_msg_output(fp, rkmessage); if (conf.mode == 'C') { rd_kafka_offset_store(rkmessage->rkt, rkmessage->partition, rkmessage->offset); } if (++stats.rx == (uint64_t)conf.msg_cnt) conf.run = 0; } #if RD_KAFKA_VERSION >= 0x00090000 static void throttle_cb (rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque){ INFO(1, "Broker %s (%"PRId32") throttled request for %dms\n", broker_name, broker_id, throttle_time_ms); } #endif #if ENABLE_KAFKACONSUMER static void print_partition_list (int is_assigned, const rd_kafka_topic_partition_list_t *partitions) { int i; for (i = 0 ; i < partitions->cnt ; i++) { fprintf(stderr, "%s%s [%"PRId32"]", i > 0 ? ", ":"", partitions->elems[i].topic, partitions->elems[i].partition); } fprintf(stderr, "\n"); } static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque) { INFO(1, "Group %s rebalanced (memberid %s): ", conf.group, rd_kafka_memberid(rk)); switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: if (conf.verbosity >= 1) { fprintf(stderr, "assigned: "); print_partition_list(1, partitions); } rd_kafka_assign(rk, partitions); break; case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: if (conf.verbosity >= 1) { fprintf(stderr, "revoked: "); print_partition_list(1, partitions); } rd_kafka_assign(rk, NULL); break; default: INFO(0, "failed: %s\n", rd_kafka_err2str(err)); break; } } /** * Run high-level KafkaConsumer, write messages to 'fp' */ static void kafkaconsumer_run (FILE *fp, char *const *topics, int topic_cnt) { char errstr[512]; rd_kafka_resp_err_t err; rd_kafka_topic_partition_list_t *topiclist; int i; rd_kafka_conf_set_rebalance_cb(conf.rk_conf, rebalance_cb); rd_kafka_conf_set_default_topic_conf(conf.rk_conf, conf.rkt_conf); conf.rkt_conf = NULL; /* Create consumer */ if (!(conf.rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create consumer: %s", errstr); conf.rk_conf = NULL; /* Forward main event queue to consumer queue so we can * serve both queues with a single consumer_poll() call. */ rd_kafka_poll_set_consumer(conf.rk); if (conf.debug) rd_kafka_set_log_level(conf.rk, LOG_DEBUG); else if (conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Build subscription set */ topiclist = rd_kafka_topic_partition_list_new(topic_cnt); for (i = 0 ; i < topic_cnt ; i++) rd_kafka_topic_partition_list_add(topiclist, topics[i], -1); /* Subscribe */ if ((err = rd_kafka_subscribe(conf.rk, topiclist))) FATAL("Failed to subscribe to %d topics: %s\n", topiclist->cnt, rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topiclist); /* Read messages from Kafka, write to 'fp'. */ while (conf.run) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(conf.rk, 100); if (!rkmessage) continue; consume_cb(rkmessage, fp); rd_kafka_message_destroy(rkmessage); } if ((err = rd_kafka_consumer_close(conf.rk))) FATAL("Failed to close consumer: %s\n", rd_kafka_err2str(err)); /* Wait for outstanding requests to finish. */ conf.run = 1; while (conf.run && rd_kafka_outq_len(conf.rk) > 0) rd_kafka_poll(conf.rk, 50); rd_kafka_destroy(conf.rk); } #endif /** * Run consumer, consuming messages from Kafka and writing to 'fp'. */ static void consumer_run (FILE *fp) { char errstr[512]; rd_kafka_resp_err_t err; const rd_kafka_metadata_t *metadata; int i; rd_kafka_queue_t *rkqu; /* Create consumer */ if (!(conf.rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create producer: %s", errstr); if (!conf.debug && conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* The callback-based consumer API's offset store granularity is * not good enough for us, disable automatic offset store * and do it explicitly per-message in the consume callback instead. */ if (rd_kafka_topic_conf_set(conf.rkt_conf, "auto.commit.enable", "false", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) FATAL("%s", errstr); /* Create topic */ if (!(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; /* Query broker for topic + partition information. */ if ((err = rd_kafka_metadata(conf.rk, 0, conf.rkt, &metadata, 5000))) FATAL("Failed to query metadata for topic %s: %s", rd_kafka_topic_name(conf.rkt), rd_kafka_err2str(err)); /* Error handling */ if (metadata->topic_cnt == 0) FATAL("No such topic in cluster: %s", rd_kafka_topic_name(conf.rkt)); if ((err = metadata->topics[0].err)) FATAL("Topic %s error: %s", rd_kafka_topic_name(conf.rkt), rd_kafka_err2str(err)); if (metadata->topics[0].partition_cnt == 0) FATAL("Topic %s has no partitions", rd_kafka_topic_name(conf.rkt)); /* If Exit-at-EOF is enabled, set up array to track EOF * state for each partition. */ if (conf.exit_eof) { part_eof = calloc(sizeof(*part_eof), metadata->topics[0].partition_cnt); if (conf.partition != RD_KAFKA_PARTITION_UA) part_eof_thres = 1; else part_eof_thres = metadata->topics[0].partition_cnt; } /* Create a shared queue that combines messages from * all wanted partitions. */ rkqu = rd_kafka_queue_new(conf.rk); /* Start consuming from all wanted partitions. */ for (i = 0 ; i < metadata->topics[0].partition_cnt ; i++) { int32_t partition = metadata->topics[0].partitions[i].id; /* If -p was specified: skip unwanted partitions */ if (conf.partition != RD_KAFKA_PARTITION_UA && conf.partition != partition) continue; /* Start consumer for this partition */ if (rd_kafka_consume_start_queue(conf.rkt, partition, conf.offset, rkqu) == -1) FATAL("Failed to start consuming " "topic %s [%"PRId32"]: %s", conf.topic, partition, rd_kafka_err2str(rd_kafka_errno2err(errno))); if (conf.partition != RD_KAFKA_PARTITION_UA) break; } if (conf.partition != RD_KAFKA_PARTITION_UA && i == metadata->topics[0].partition_cnt) FATAL("Topic %s (with partitions 0..%i): " "partition %i does not exist", rd_kafka_topic_name(conf.rkt), metadata->topics[0].partition_cnt-1, conf.partition); /* Read messages from Kafka, write to 'fp'. */ while (conf.run) { rd_kafka_consume_callback_queue(rkqu, 100, consume_cb, fp); /* Poll for errors, etc */ rd_kafka_poll(conf.rk, 0); } /* Stop consuming */ for (i = 0 ; i < metadata->topics[0].partition_cnt ; i++) { int32_t partition = metadata->topics[0].partitions[i].id; /* If -p was specified: skip unwanted partitions */ if (conf.partition != RD_KAFKA_PARTITION_UA && conf.partition != partition) continue; /* Dont stop already stopped partitions */ if (!part_eof || !part_eof[partition]) rd_kafka_consume_stop(conf.rkt, partition); rd_kafka_consume_stop(conf.rkt, partition); } /* Destroy shared queue */ rd_kafka_queue_destroy(rkqu); /* Wait for outstanding requests to finish. */ conf.run = 1; while (conf.run && rd_kafka_outq_len(conf.rk) > 0) rd_kafka_poll(conf.rk, 50); rd_kafka_metadata_destroy(metadata); rd_kafka_topic_destroy(conf.rkt); rd_kafka_destroy(conf.rk); } /** * Print metadata information */ static void metadata_print (const rd_kafka_metadata_t *metadata) { int i, j, k; printf("Metadata for %s (from broker %"PRId32": %s):\n", conf.topic ? conf.topic : "all topics", metadata->orig_broker_id, metadata->orig_broker_name); /* Iterate brokers */ printf(" %i brokers:\n", metadata->broker_cnt); for (i = 0 ; i < metadata->broker_cnt ; i++) printf(" broker %"PRId32" at %s:%i\n", metadata->brokers[i].id, metadata->brokers[i].host, metadata->brokers[i].port); /* Iterate topics */ printf(" %i topics:\n", metadata->topic_cnt); for (i = 0 ; i < metadata->topic_cnt ; i++) { const rd_kafka_metadata_topic_t *t = &metadata->topics[i]; printf(" topic \"%s\" with %i partitions:", t->topic, t->partition_cnt); if (t->err) { printf(" %s", rd_kafka_err2str(t->err)); if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) printf(" (try again)"); } printf("\n"); /* Iterate topic's partitions */ for (j = 0 ; j < t->partition_cnt ; j++) { const rd_kafka_metadata_partition_t *p; p = &t->partitions[j]; printf(" partition %"PRId32", " "leader %"PRId32", replicas: ", p->id, p->leader); /* Iterate partition's replicas */ for (k = 0 ; k < p->replica_cnt ; k++) printf("%s%"PRId32, k > 0 ? ",":"", p->replicas[k]); /* Iterate partition's ISRs */ printf(", isrs: "); for (k = 0 ; k < p->isr_cnt ; k++) printf("%s%"PRId32, k > 0 ? ",":"", p->isrs[k]); if (p->err) printf(", %s\n", rd_kafka_err2str(p->err)); else printf("\n"); } } } /** * Lists metadata */ static void metadata_list (void) { char errstr[512]; rd_kafka_resp_err_t err; const rd_kafka_metadata_t *metadata; /* Create handle */ if (!(conf.rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create producer: %s", errstr); if (!conf.debug && conf.verbosity == 0) rd_kafka_set_log_level(conf.rk, 0); /* Create topic, if specified */ if (conf.topic && !(conf.rkt = rd_kafka_topic_new(conf.rk, conf.topic, conf.rkt_conf))) FATAL("Failed to create topic %s: %s", conf.topic, rd_kafka_err2str(rd_kafka_errno2err(errno))); conf.rk_conf = NULL; conf.rkt_conf = NULL; /* Fetch metadata */ err = rd_kafka_metadata(conf.rk, conf.rkt ? 0 : 1, conf.rkt, &metadata, 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) FATAL("Failed to acquire metadata: %s", rd_kafka_err2str(err)); /* Print metadata */ #if ENABLE_JSON if (conf.flags & CONF_F_FMT_JSON) metadata_print_json(metadata); else #endif metadata_print(metadata); rd_kafka_metadata_destroy(metadata); if (conf.rkt) rd_kafka_topic_destroy(conf.rkt); rd_kafka_destroy(conf.rk); } /** * Print usage and exit. */ static void RD_NORETURN usage (const char *argv0, int exitcode, const char *reason, int version_only) { FILE *out = stdout; char features[256]; size_t flen; rd_kafka_conf_t *tmpconf; if (reason) { out = stderr; fprintf(out, "Error: %s\n\n", reason); } if (!version_only) fprintf(out, "Usage: %s [file1 file2 .. | topic1 topic2 ..]]\n", argv0); /* Create a temporary config object to extract builtin.features */ tmpconf = rd_kafka_conf_new(); flen = sizeof(features); if (rd_kafka_conf_get(tmpconf, "builtin.features", features, &flen) != RD_KAFKA_CONF_OK) strncpy(features, "n/a", sizeof(features)); rd_kafka_conf_destroy(tmpconf); fprintf(out, "kafkacat - Apache Kafka producer and consumer tool\n" "https://github.com/edenhill/kafkacat\n" "Copyright (c) 2014-2015, Magnus Edenhill\n" "Version %s%s (librdkafka %s builtin.features=%s)\n" "\n", KAFKACAT_VERSION, #if ENABLE_JSON " (JSON)", #else "", #endif rd_kafka_version_str(), features ); if (version_only) exit(exitcode); fprintf(out, "\n" "General options:\n" " -C | -P | -L | -Q Mode: Consume, Produce, Metadata List, Query mode\n" #if ENABLE_KAFKACONSUMER " -G Mode: High-level KafkaConsumer (Kafka 0.9 balanced consumer groups)\n" " Expects a list of topics to subscribe to\n" #endif " -t Topic to consume from, produce to, " "or list\n" " -p Partition\n" " -b Bootstrap broker(s) (host[:port])\n" " -D Message delimiter character:\n" " a-z.. | \\r | \\n | \\t | \\xNN\n" " Default: \\n\n" " -E Do not exit on non fatal error\n" " -K Key delimiter (same format as -D)\n" " -c Limit message count\n" " -X list List available librdkafka configuration " "properties\n" " -X prop=val Set librdkafka configuration property.\n" " Properties prefixed with \"topic.\" are\n" " applied as topic properties.\n" " -X dump Dump configuration and exit.\n" " -d Enable librdkafka debugging:\n" " " RD_KAFKA_DEBUG_CONTEXTS "\n" " -q Be quiet (verbosity set to 0)\n" " -v Increase verbosity\n" " -V Print version\n" " -h Print usage help\n" "\n" "Producer options:\n" " -z snappy|gzip Message compression. Default: none\n" " -p -1 Use random partitioner\n" " -D Delimiter to split input into messages\n" " -K Delimiter to split input key and message\n" " -l Send messages from a file separated by\n" " delimiter, as with stdin.\n" " (only one file allowed)\n" " -T Output sent messages to stdout, acting like tee.\n" " -c Exit after producing this number " "of messages\n" " -Z Send empty messages as NULL messages\n" " file1 file2.. Read messages from files.\n" " With -l, only one file permitted.\n" " Otherwise, the entire file contents will\n" " be sent as one single message.\n" "\n" "Consumer options:\n" " -o Offset to start consuming from:\n" " beginning | end | stored |\n" " (absolute offset) |\n" " - (relative offset from end)\n" " -e Exit successfully when last message " "received\n" " -f Output formatting string, see below.\n" " Takes precedence over -D and -K.\n" #if ENABLE_JSON " -J Output with JSON envelope\n" #endif " -D Delimiter to separate messages on output\n" " -K Print message keys prefixing the message\n" " with specified delimiter.\n" " -O Print message offset using -K delimiter\n" " -c Exit after consuming this number " "of messages\n" " -Z Print NULL messages and keys as \"%s\"" "(instead of empty)\n" " -u Unbuffered output\n" "\n" "Metadata options (-L):\n" " -t Topic to query (optional)\n" "\n" "Query options (-Q):\n" " -t :

: Get offset for topic ,\n" " partition

, timestamp .\n" " Timestamp is the number of milliseconds\n" " since epoch UTC.\n" " Requires broker >= 0.10.0.0 and librdkafka >= 0.9.3.\n" " Multiple -t .. are allowed but a partition\n" " must only occur once.\n" "\n" "Format string tokens:\n" " %%s Message payload\n" " %%S Message payload length (or -1 for NULL)\n" " %%R Message payload length (or -1 for NULL) serialized\n" " as a binary big endian 32-bit signed integer\n" " %%k Message key\n" " %%K Message key length (or -1 for NULL)\n" #if RD_KAFKA_VERSION >= 0x000902ff " %%T Message timestamp (milliseconds since epoch UTC)\n" #endif " %%t Topic\n" " %%p Partition\n" " %%o Message offset\n" " \\n \\r \\t Newlines, tab\n" " \\xXX \\xNNN Any ASCII character\n" " Example:\n" " -f 'Topic %%t [%%p] at offset %%o: key %%k: %%s\\n'\n" "\n" "\n" "Consumer mode (writes messages to stdout):\n" " kafkacat -b -t -p \n" " or:\n" " kafkacat -C -b ...\n" "\n" #if ENABLE_KAFKACONSUMER "High-level KafkaConsumer mode:\n" " kafkacat -b -G topic1 top2 ^aregex\\d+\n" "\n" #endif "Producer mode (reads messages from stdin):\n" " ... | kafkacat -b -t -p \n" " or:\n" " kafkacat -P -b ...\n" "\n" "Metadata listing:\n" " kafkacat -L -b [-t ]\n" "\n" "Query offset by timestamp:\n" " kafkacat -Q -b broker -t ::\n" "\n", conf.null_str ); exit(exitcode); } /** * Terminate by putting out the run flag. */ static void term (int sig) { conf.run = 0; } /** * librdkafka error callback */ static void error_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) { if (err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) { ERROR("%s: %s", rd_kafka_err2str(err), reason ? reason : ""); } else { INFO(1, "ERROR: %s: %s\n", rd_kafka_err2str(err), reason ? reason : ""); } } /** * Parse delimiter string from command line arguments. */ static int parse_delim (const char *str) { int delim; if (!strncmp(str, "\\x", strlen("\\x"))) delim = strtoul(str+strlen("\\x"), NULL, 16) & 0xff; else if (!strcmp(str, "\\n")) delim = (int)'\n'; else if (!strcmp(str, "\\t")) delim = (int)'\t'; else delim = (int)*str & 0xff; return delim; } /** * @brief Add topic+partition+offset to list, from :-separated string. * * ":

:" * * @remark Will modify \p str */ static void add_topparoff (const char *what, rd_kafka_topic_partition_list_t *rktparlist, char *str) { char *s, *t, *e; char *topic; int partition; int64_t offset; if (!(s = strchr(str, ':')) || !(t = strchr(s+1, ':'))) FATAL("%s: expected \"topic:partition:offset_or_timestamp\"", what); topic = str; *s = '\0'; partition = strtoul(s+1, &e, 0); if (e == s+1) FATAL("%s: expected \"topic:partition:offset_or_timestamp\"", what); offset = strtoll(t+1, &e, 0); if (e == t+1) FATAL("%s: expected \"topic:partition:offset_or_timestamp\"", what); rd_kafka_topic_partition_list_add(rktparlist, topic, partition)->offset = offset; } /** * Dump current rdkafka configuration to stdout. */ static void conf_dump (void) { const char **arr; size_t cnt; int pass; for (pass = 0 ; pass < 2 ; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf.rk_conf, &cnt); printf("# Global config\n"); } else { printf("# Topic config\n"); arr = rd_kafka_topic_conf_dump(conf.rkt_conf, &cnt); } for (i = 0 ; i < (int)cnt ; i += 2) printf("%s = %s\n", arr[i], arr[i+1]); printf("\n"); rd_kafka_conf_dump_free(arr, cnt); } } /** * Parse command line arguments */ static void argparse (int argc, char **argv, rd_kafka_topic_partition_list_t **rktparlistp) { char errstr[512]; int opt; const char *fmt = NULL; const char *delim = "\n"; const char *key_delim = NULL; char tmp_fmt[64]; int conf_brokers_seen = 0; int do_conf_dump = 0; while ((opt = getopt(argc, argv, "PCG:LQt:p:b:z:o:eED:K:Od:qvX:c:Tuf:ZlVh" #if ENABLE_JSON "J" #endif )) != -1) { switch (opt) { case 'P': case 'C': case 'L': case 'Q': conf.mode = opt; break; #if ENABLE_KAFKACONSUMER case 'G': conf.mode = opt; conf.group = optarg; if (rd_kafka_conf_set(conf.rk_conf, "group.id", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) FATAL("%s", errstr); break; #endif case 't': if (conf.mode == 'Q') { if (!*rktparlistp) *rktparlistp = rd_kafka_topic_partition_list_new(1); add_topparoff("-t", *rktparlistp, optarg); } else conf.topic = optarg; break; case 'p': conf.partition = atoi(optarg); break; case 'b': conf.brokers = optarg; conf_brokers_seen++; break; case 'z': if (rd_kafka_conf_set(conf.rk_conf, "compression.codec", optarg, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) FATAL("%s", errstr); break; case 'o': if (!strcmp(optarg, "end")) conf.offset = RD_KAFKA_OFFSET_END; else if (!strcmp(optarg, "beginning")) conf.offset = RD_KAFKA_OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) conf.offset = RD_KAFKA_OFFSET_STORED; else { conf.offset = strtoll(optarg, NULL, 10); if (conf.offset < 0) conf.offset = RD_KAFKA_OFFSET_TAIL(-conf.offset); } break; case 'e': conf.exit_eof = 1; break; case 'E': conf.exitonerror = 0; break; case 'f': fmt = optarg; break; #if ENABLE_JSON case 'J': conf.flags |= CONF_F_FMT_JSON; break; #endif case 'D': delim = optarg; break; case 'K': key_delim = optarg; conf.flags |= CONF_F_KEY_DELIM; break; case 'l': conf.flags |= CONF_F_LINE; break; case 'O': conf.flags |= CONF_F_OFFSET; break; case 'c': conf.msg_cnt = strtoll(optarg, NULL, 10); break; case 'Z': conf.flags |= CONF_F_NULL; conf.null_str_len = strlen(conf.null_str); break; case 'd': conf.debug = optarg; if (rd_kafka_conf_set(conf.rk_conf, "debug", conf.debug, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) FATAL("%s", errstr); break; case 'q': conf.verbosity = 0; break; case 'v': conf.verbosity++; break; case 'T': conf.flags |= CONF_F_TEE; break; case 'u': setbuf(stdout, NULL); break; case 'X': { char *name, *val; rd_kafka_conf_res_t res; if (!strcmp(optarg, "list") || !strcmp(optarg, "help")) { rd_kafka_conf_properties_show(stdout); exit(0); } if (!strcmp(optarg, "dump")) { do_conf_dump = 1; continue; } name = optarg; if (!(val = strchr(name, '='))) { fprintf(stderr, "%% Expected " "-X property=value, not %s, " "use -X list to display available " "properties\n", name); exit(1); } *val = '\0'; val++; if (!strcmp(name, "metadata.broker.list") || !strcmp(name, "bootstrap.servers")) conf_brokers_seen++; res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) res = rd_kafka_topic_conf_set(conf.rkt_conf, name+ strlen("topic."), val, errstr, sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN) { res = rd_kafka_conf_set(conf.rk_conf, name, val, errstr, sizeof(errstr)); } if (res != RD_KAFKA_CONF_OK) FATAL("%s", errstr); /* Interception */ #if RD_KAFKA_VERSION >= 0x00090000 if (!strcmp(name, "quota.support.enable")) rd_kafka_conf_set_throttle_cb(conf.rk_conf, throttle_cb); #endif if (!strcmp(name, "api.version.request")) conf.flags |= CONF_F_APIVERREQ_USER; } break; case 'V': usage(argv[0], 0, NULL, 1); break; case 'h': usage(argv[0], 0, NULL, 0); break; default: usage(argv[0], 1, "unknown argument", 0); break; } } /* Dump configuration and exit, if so desired. */ if (do_conf_dump) { conf_dump(); exit(0); } if (!conf_brokers_seen) usage(argv[0], 1, "-b missing", 0); /* Decide mode if not specified */ if (!conf.mode) { if (_COMPAT(isatty)(STDIN_FILENO)) conf.mode = 'C'; else conf.mode = 'P'; INFO(1, "Auto-selecting %s mode (use -P or -C to override)\n", conf.mode == 'C' ? "Consumer":"Producer"); } if (!strchr("GLQ", conf.mode) && !conf.topic) usage(argv[0], 1, "-t missing", 0); else if (conf.mode == 'Q' && !*rktparlistp) usage(argv[0], 1, "-t :: missing", 0); if (conf.brokers && rd_kafka_conf_set(conf.rk_conf, "metadata.broker.list", conf.brokers, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) usage(argv[0], 1, errstr, 0); rd_kafka_conf_set_error_cb(conf.rk_conf, error_cb); fmt_init(); if (strchr("GC", conf.mode)) { if (!fmt) { if ((conf.flags & CONF_F_FMT_JSON)) { /* For JSON the format string is simply the * output object delimiter (e.g., newline). */ fmt = delim; } else { if (key_delim) snprintf(tmp_fmt, sizeof(tmp_fmt), "%%k%s%%s%s", key_delim, delim); else snprintf(tmp_fmt, sizeof(tmp_fmt), "%%s%s", delim); fmt = tmp_fmt; } } fmt_parse(fmt); } else if (conf.mode == 'P') { conf.delim = parse_delim(delim); if (conf.flags & CONF_F_KEY_DELIM) conf.key_delim = parse_delim(key_delim); } /* Automatically enable API version requests if needed and * user hasn't explicitly configured it (in any way). */ if ((conf.flags & (CONF_F_APIVERREQ | CONF_F_APIVERREQ_USER)) == CONF_F_APIVERREQ) { INFO(2, "Automatically enabling api.version.request=true\n"); rd_kafka_conf_set(conf.rk_conf, "api.version.request", "true", NULL, 0); } } int main (int argc, char **argv) { #ifdef SIGIO char tmp[16]; #endif FILE *in = stdin; struct timeval tv; rd_kafka_topic_partition_list_t *rktparlist = NULL; signal(SIGINT, term); signal(SIGTERM, term); #ifdef SIGPIPE signal(SIGPIPE, term); #endif /* Seed rng for random partitioner, jitter, etc. */ rd_gettimeofday(&tv, NULL); srand(tv.tv_usec); /* Create config containers */ conf.rk_conf = rd_kafka_conf_new(); conf.rkt_conf = rd_kafka_topic_conf_new(); /* * Default config */ #ifdef SIGIO /* Enable quick termination of librdkafka */ snprintf(tmp, sizeof(tmp), "%i", SIGIO); rd_kafka_conf_set(conf.rk_conf, "internal.termination.signal", tmp, NULL, 0); #endif /* Log callback */ rd_kafka_conf_set_log_cb(conf.rk_conf, rd_kafka_log_print); /* Parse command line arguments */ argparse(argc, argv, &rktparlist); if (optind < argc) { if (!strchr("PG", conf.mode)) usage(argv[0], 1, "file/topic list only allowed in " "producer(-P)/kafkaconsumer(-G) mode", 0); else if ((conf.flags & CONF_F_LINE) && argc - optind > 1) FATAL("Only one file allowed for line mode (-l)"); else if (conf.flags & CONF_F_LINE) { in = fopen(argv[optind], "r"); if (in == NULL) FATAL("Cannot open %s: %s", argv[optind], strerror(errno)); } } /* Run according to mode */ switch (conf.mode) { case 'C': consumer_run(stdout); break; #if ENABLE_KAFKACONSUMER case 'G': kafkaconsumer_run(stdout, &argv[optind], argc-optind); break; #endif case 'P': producer_run(in, &argv[optind], argc-optind); break; case 'L': metadata_list(); break; case 'Q': if (!rktparlist) usage(argv[0], 1, "-Q requires one or more " "-t ::", 0); query_offsets_by_time(rktparlist); rd_kafka_topic_partition_list_destroy(rktparlist); break; default: usage(argv[0], 0, NULL, 0); break; } if (in != stdin) fclose(in); rd_kafka_wait_destroyed(5000); fmt_term(); exit(conf.exitcode); } kafkacat-1.3.1/kafkacat.h000066400000000000000000000107711307612056000152050ustar00rootroot00000000000000/* * kafkacat - Apache Kafka consumer and producer * * Copyright (c) 2015, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include #include #include #include #include #include "rdport.h" #ifdef _MSC_VER #pragma comment(lib, "librdkafka.lib") #include "win32/win32_config.h" #else #include "config.h" #endif typedef enum { KC_FMT_STR, KC_FMT_OFFSET, KC_FMT_KEY, KC_FMT_KEY_LEN, KC_FMT_PAYLOAD, KC_FMT_PAYLOAD_LEN, KC_FMT_PAYLOAD_LEN_BINARY, KC_FMT_TOPIC, KC_FMT_PARTITION, KC_FMT_TIMESTAMP } fmt_type_t; #define KC_FMT_MAX_SIZE 128 struct conf { int run; int verbosity; int exitcode; int exitonerror; char mode; int flags; #define CONF_F_FMT_JSON 0x1 /* JSON formatting */ #define CONF_F_KEY_DELIM 0x2 /* Producer: use key delimiter */ #define CONF_F_OFFSET 0x4 /* Print offsets */ #define CONF_F_TEE 0x8 /* Tee output when producing */ #define CONF_F_NULL 0x10 /* Send empty messages as NULL */ #define CONF_F_LINE 0x20 /* Read files in line mode when producing */ #define CONF_F_APIVERREQ 0x40 /* Enable api.version.request=true */ #define CONF_F_APIVERREQ_USER 0x80 /* User set api.version.request */ int delim; int key_delim; struct { fmt_type_t type; const char *str; int str_len; } fmt[KC_FMT_MAX_SIZE]; int fmt_cnt; int msg_size; char *brokers; char *topic; int32_t partition; char *group; int64_t offset; int exit_eof; int64_t msg_cnt; char *null_str; int null_str_len; rd_kafka_conf_t *rk_conf; rd_kafka_topic_conf_t *rkt_conf; rd_kafka_t *rk; rd_kafka_topic_t *rkt; char *debug; }; extern struct conf conf; void RD_NORETURN fatal0 (const char *func, int line, const char *fmt, ...); void error0 (int erroronexit, const char *func, int line, const char *fmt, ...); #define FATAL(.../*fmt*/) fatal0(__FUNCTION__, __LINE__, __VA_ARGS__) #define ERROR(.../*fmt*/) error0(conf.exitonerror, __FUNCTION__, __LINE__, __VA_ARGS__) /* Info printout */ #define INFO(VERBLVL,.../*fmt*/) do { \ if (conf.verbosity >= (VERBLVL)) \ fprintf(stderr, "%% " __VA_ARGS__); \ } while (0) /* * format.c */ void fmt_msg_output (FILE *fp, const rd_kafka_message_t *rkmessage); void fmt_parse (const char *fmt); void fmt_init (void); void fmt_term (void); #if ENABLE_JSON /* * json.c */ void fmt_msg_output_json (FILE *fp, const rd_kafka_message_t *rkmessage); void metadata_print_json (const struct rd_kafka_metadata *metadata); void partition_list_print_json (const rd_kafka_topic_partition_list_t *parts, void *json_gen); void fmt_init_json (void); void fmt_term_json (void); #endif /* * tools.c */ int query_offsets_by_time (rd_kafka_topic_partition_list_t *offsets); kafkacat-1.3.1/mklove/000077500000000000000000000000001307612056000145565ustar00rootroot00000000000000kafkacat-1.3.1/mklove/Makefile.base000077500000000000000000000117521307612056000171400ustar00rootroot00000000000000# Base Makefile providing various standard targets # Part of mklove suite but may be used independently. MKL_RED?= \033[031m MKL_GREEN?= \033[032m MKL_YELLOW?= \033[033m MKL_BLUE?= \033[034m MKL_CLR_RESET?= \033[0m DEPS= $(OBJS:%.o=%.d) # TOPDIR is "TOPDIR/mklove/../" i.e., TOPDIR. # We do it with two dir calls instead of /.. to support mklove being symlinked. MKLOVE_DIR := $(dir $(lastword $(MAKEFILE_LIST))) TOPDIR = $(MKLOVE_DIR:mklove/=.) # Convert LIBNAME ("libxyz") to "xyz" LIBNAME0=$(LIBNAME:lib%=%) # Silence lousy default ARFLAGS (rv) ARFLAGS= ifndef MKL_MAKEFILE_CONFIG -include $(TOPDIR)/Makefile.config endif _UNAME_S := $(shell uname -s) ifeq ($(_UNAME_S),Darwin) LIBFILENAME=$(LIBNAME).$(LIBVER).dylib LIBFILENAMELINK=$(LIBNAME).dylib else LIBFILENAME=$(LIBNAME).so.$(LIBVER) LIBFILENAMELINK=$(LIBNAME).so endif INSTALL?= install INSTALL_PROGRAM?= $(INSTALL) INSTALL_DATA?= $(INSTALL) -m 644 prefix?= /usr/local exec_prefix?= $(prefix) bindir?= $(exec_prefix)/bin sbindir?= $(exec_prefix)/sbin libexecdir?= $(exec_prefix)/libexec/ # append PKGNAME on install datarootdir?= $(prefix)/share datadir?= $(datarootdir) # append PKGNAME on install sysconfdir?= $(prefix)/etc sharedstatedir?=$(prefix)/com localestatedir?=$(prefix)/var runstatedir?= $(localestatedir)/run includedir?= $(prefix)/include docdir?= $(datarootdir)/doc/$(PKGNAME) infodir?= $(datarootdir)/info libdir?= $(prefix)/lib localedir?= $(datarootdir)/locale pkgconfigdir?= $(libdir)/pkgconfig mandir?= $(datarootdir)/man man1dir?= $(mandir)/man1 man2dir?= $(mandir)/man2 man3dir?= $(mandir)/man3 man4dir?= $(mandir)/man4 man5dir?= $(mandir)/man5 man6dir?= $(mandir)/man6 man7dir?= $(mandir)/man7 man8dir?= $(mandir)/man8 # Checks that mklove is set up and ready for building mklove-check: @if [ ! -f "$(TOPDIR)/Makefile.config" ]; then \ printf "$(MKL_RED)$(TOPDIR)/Makefile.config missing: please run ./configure$(MKL_CLR_RESET)\n" ; \ exit 1 ; \ fi %.o: %.c $(CC) -MD -MP $(CPPFLAGS) $(CFLAGS) -c $< -o $@ %.o: %.cpp $(CXX) -MD -MP $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@ lib: $(LIBFILENAME) $(LIBNAME).a $(LIBFILENAMELINK) lib-gen-pkg-config $(LIBFILENAME): $(OBJS) $(LIBNAME).lds @printf "$(MKL_YELLOW)Creating shared library $@$(MKL_CLR_RESET)\n" $(CC) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS) $(LIBNAME).a: $(OBJS) @printf "$(MKL_YELLOW)Creating static library $@$(MKL_CLR_RESET)\n" $(AR) rcs$(ARFLAGS) $@ $(OBJS) $(LIBFILENAMELINK): $(LIBFILENAME) @printf "$(MKL_YELLOW)Creating $@ symlink$(MKL_CLR_RESET)\n" rm -f "$@" && ln -s "$^" "$@" # pkg-config .pc file definition ifeq ($(GEN_PKG_CONFIG),y) define _PKG_CONFIG_DEF prefix=$(prefix) libdir=$(libdir) includedir=$(includedir) Name: $(LIBNAME) Description: $(MKL_APP_DESC_ONELINE) Version: $(MKL_APP_VERSION) Cflags: -I$${includedir} Libs: -L$${libdir} -l$(LIBNAME0) Libs.private: $(patsubst -L%,,$(LIBS)) endef export _PKG_CONFIG_DEF $(LIBNAME0).pc: ../Makefile.config @printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n" @echo "$$_PKG_CONFIG_DEF" > $@ lib-gen-pkg-config: $(LIBNAME0).pc lib-clean-pkg-config: rm -f $(LIBNAME0).pc else lib-gen-pkg-config: lib-clean-pkg-config: endif $(BIN): $(OBJS) @printf "$(MKL_YELLOW)Creating program $@$(MKL_CLR_RESET)\n" $(CC) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS) file-check: @printf "$(MKL_YELLOW)Checking $(LIBNAME) integrity$(MKL_CLR_RESET)\n" @RET=true ; \ for f in $(CHECK_FILES) ; do \ printf "%-30s " $$f ; \ if [ -f "$$f" ]; then \ printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n" ; \ else \ printf "$(MKL_RED)MISSING$(MKL_CLR_RESET)\n" ; \ RET=false ; \ fi ; \ done ; \ $$RET lib-install: @printf "$(MKL_YELLOW)Install $(LIBNAME) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" $(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME) ; \ $(INSTALL) -d $$DESTDIR$(libdir) ; \ $(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME) ; \ $(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir) ; \ $(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir) ; \ [ -f "$(LIBNAME0).pc" ] && ( \ $(INSTALL) -d $$DESTDIR$(pkgconfigdir) ; \ $(INSTALL) -m 0644 $(LIBNAME0).pc $$DESTDIR$(pkgconfigdir) \ ) ; \ (cd $$DESTDIR$(libdir) && ln -sf $(LIBFILENAME) $(LIBFILENAMELINK)) lib-uninstall: @printf "$(MKL_YELLOW)Uninstall $(LIBNAME) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" for hdr in $(HDRS) ; do \ rm -f $$DESTDIR$(includedir)/$(PKGNAME)/$$hdr ; done rm -f $$DESTDIR$(libdir)/$(LIBNAME).a rm -f $$DESTDIR$(libdir)/$(LIBFILENAME) rm -f $$DESTDIR$(libdir)/$(LIBFILENAMELINK) rmdir $$DESTDIR$(includedir)/$(PKGNAME) || true bin-install: @printf "$(MKL_YELLOW)Install $(BIN) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" $(INSTALL) -d $$DESTDIR$(bindir) && \ $(INSTALL) $(BIN) $$DESTDIR$(bindir) bin-uninstall: @printf "$(MKL_YELLOW)Uninstall $(BIN) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" rm -f $$DESTDIR$(bindir)/$(BIN) generic-clean: rm -f $(OBJS) $(DEPS) lib-clean: generic-clean lib-clean-pkg-config rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMELINK) \ $(LIBNAME).lds bin-clean: generic-clean rm -f $(BIN) kafkacat-1.3.1/mklove/modules/000077500000000000000000000000001307612056000162265ustar00rootroot00000000000000kafkacat-1.3.1/mklove/modules/configure.base000066400000000000000000001212741307612056000210520ustar00rootroot00000000000000#!/bin/bash # # # mklove base configure module, implements the mklove configure framework # MKL_MODULES="base" MKL_CACHEVARS="" MKL_MKVARS="" MKL_DEFINES="" MKL_CHECKS="" MKL_LOAD_STACK="" MKL_IDNEXT=1 MKL_OUTMK=_mklout.mk MKL_OUTH=_mklout.h MKL_OUTDBG=config.log MKL_GENERATORS="base:mkl_generate_late_vars" MKL_CLEANERS="" MKL_FAILS="" MKL_LATE_VARS="" MKL_OPTS_SET="" MKL_RED="" MKL_GREEN="" MKL_YELLOW="" MKL_BLUE="" MKL_CLR_RESET="" MKL_NO_DOWNLOAD=0 if [[ -z "$MKL_REPO_URL" ]]; then MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master" fi # Default mklove directory to PWD/mklove [[ -z "$MKLOVE_DIR" ]] && MKLOVE_DIR=mklove ########################################################################### # # Variable types: # env - Standard environment variables. # var - mklove runtime variable, cached or not. # mkvar - Makefile variables, also sets runvar # define - config.h variables/defines # ########################################################################### # Low level variable assignment # Arguments: # variable name # variable value function mkl_var0_set { export "$1"="$2" } # Sets a runtime variable (only used during configure) # If cache=1 these variables are cached to config.cache. # Arguments: # variable name # variable value # [ "cache" ] function mkl_var_set { mkl_var0_set "$1" "$2" if [[ $3 == "cache" ]]; then if ! mkl_in_list "$MKL_CACHEVARS" "$1" ; then MKL_CACHEVARS="$MKL_CACHEVARS $1" fi fi } # Unsets a mkl variable # Arguments: # variable name function mkl_var_unset { unset $1 } # Appends to a mkl variable (space delimited) # Arguments: # variable name # variable value function mkl_var_append { if [[ -z ${!1} ]]; then mkl_var_set "$1" "$2" else mkl_var0_set "$1" "${!1} $2" fi } # Prepends to a mkl variable (space delimited) # Arguments: # variable name # variable value function mkl_var_prepend { if [[ -z ${!1} ]]; then mkl_var_set "$1" "$2" else mkl_var0_set "$1" "$2 ${!1}" fi } # Shift the first word off a variable. # Arguments: # variable name function mkl_var_shift { local n="${!1}" mkl_var0_set "$1" "${n#* }" return 0 } # Returns the contents of mkl variable # Arguments: # variable name function mkl_var_get { echo "${!1}" } # Set environment variable (runtime) # These variables are not cached nor written to any of the output files, # its just simply a helper wrapper for standard envs. # Arguments: # varname # varvalue function mkl_env_set { mkl_var0_set "$1" "$2" } # Append to environment variable # Arguments: # varname # varvalue # [ separator (" ") ] function mkl_env_append { local sep=" " if [[ -z ${!1} ]]; then mkl_env_set "$1" "$2" else [ ! -z ${3} ] && sep="$3" mkl_var0_set "$1" "${!1}${sep}$2" fi } # Prepend to environment variable # Arguments: # varname # varvalue # [ separator (" ") ] function mkl_env_prepend { local sep=" " if [[ -z ${!1} ]]; then mkl_env_set "$1" "$2" else [ ! -z ${3} ] && sep="$3" mkl_var0_set "$1" "${2}${sep}${!1}" fi } # Set a make variable (Makefile.config) # Arguments: # config name # variable name # value function mkl_mkvar_set { if [[ ! -z $2 ]]; then mkl_env_set "$2" "$3" mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2 fi } # Appends to a make variable (Makefile.config) # Arguments: # config name # variable name # value function mkl_mkvar_append { if [[ ! -z $2 ]]; then mkl_env_append "$2" "$3" mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2 fi } # Prepends to a make variable (Makefile.config) # Arguments: # config name # variable name # value function mkl_mkvar_prepend { if [[ ! -z $2 ]]; then mkl_env_prepend "$2" "$3" mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2 fi } # Return mkvar variable value # Arguments: # variable name function mkl_mkvar_get { [[ -z ${!1} ]] && return 1 echo ${!1} return 0 } # Defines a config header define (config.h) # Arguments: # config name # define name # define value (optional, default: 1) # if value starts with code: then no "" are added function mkl_define_set { if [[ -z $2 ]]; then return 0 fi local stmt="" local defid= if [[ $2 = *\(* ]]; then # macro defid="def_${2%%(*}" else # define defid="def_$2" fi [[ -z $1 ]] || stmt="// $1\n" local val="$3" if [[ -z "$val" ]]; then val="$(mkl_def $2 1)" fi # Define as code, string or integer? if [[ $val == code:* ]]; then # Code block, copy verbatim without quotes, strip code: prefix val=${val#code:} elif [[ ! ( "$val" =~ ^[0-9]+([lL]?[lL][dDuU]?)?$ || \ "$val" =~ ^0x[0-9a-fA-F]+([lL]?[lL][dDuU]?)?$ ) ]]; then # String: quote val="\"$val\"" fi # else: unquoted integer/hex stmt="${stmt}#define $2 $val" mkl_env_set "$defid" "$stmt" mkl_env_append MKL_DEFINES "$defid" } # Sets "all" configuration variables, that is: # for name set: Makefile variable, config.h define # Will convert value "y"|"n" to 1|0 for config.h # Arguments: # config name # variable name # value function mkl_allvar_set { mkl_mkvar_set "$1" "$2" "$3" local val=$3 if [[ $3 = "y" ]]; then val=1 elif [[ $3 = "n" ]]; then val=0 fi mkl_define_set "$1" "$2" "$val" } ########################################################################### # # # Check failure functionality # # ########################################################################### # Summarize all fatal failures and then exits. function mkl_fail_summary { echo " " local pkg_cmd="" local install_pkgs="" mkl_err "###########################################################" mkl_err "### Configure failed ###" mkl_err "###########################################################" mkl_err "### Accumulated failures: ###" mkl_err "###########################################################" local n for n in $MKL_FAILS ; do local conf=$(mkl_var_get MKL_FAIL__${n}__conf) mkl_err " $conf ($(mkl_var_get MKL_FAIL__${n}__define)) $(mkl_meta_get $conf name)" if mkl_meta_exists $conf desc; then mkl_err0 " desc: $MKL_YELLOW$(mkl_meta_get $conf desc)$MKL_CLR_RESET" fi mkl_err0 " module: $(mkl_var_get MKL_FAIL__${n}__module)" mkl_err0 " action: $(mkl_var_get MKL_FAIL__${n}__action)" mkl_err0 " reason: $(mkl_var_get MKL_FAIL__${n}__reason) " # Dig up some metadata to assist the user case $MKL_DISTRO in Debian|Ubuntu|*) local debs=$(mkl_meta_get $conf "deb") pkg_cmd="sudo apt-get install" if [[ ${#debs} > 0 ]]; then install_pkgs="$install_pkgs $debs" fi ;; esac done if [[ ! -z $install_pkgs ]]; then mkl_err "###########################################################" mkl_err "### Installing the following packages might help: ###" mkl_err "###########################################################" mkl_err0 "$pkg_cmd $install_pkgs" mkl_err0 "" fi exit 1 } # Checks if there were failures. # Returns 0 if there were no failures, else calls failure summary and exits. function mkl_check_fails { if [[ ${#MKL_FAILS} = 0 ]]; then return 0 fi mkl_fail_summary } # A check has failed but we want to carry on (and we should!). # We fail it all later. # Arguments: # config name # define name # action # reason function mkl_fail { local n="$(mkl_env_esc "$1")" mkl_var_set "MKL_FAIL__${n}__conf" "$1" mkl_var_set "MKL_FAIL__${n}__module" $MKL_MODULE mkl_var_set "MKL_FAIL__${n}__define" $2 mkl_var_set "MKL_FAIL__${n}__action" "$3" if [[ -z $(mkl_var_get "MKL_FAIL__${n}__reason") ]]; then mkl_var_set "MKL_FAIL__${n}__reason" "$4" else mkl_var_append "MKL_FAIL__${n}__reason" " And also: $4" fi mkl_in_list "$MKL_FAILS" "$n" || mkl_var_append MKL_FAILS "$n" } # A check failed, handle it # Arguments: # config name # define name # action (fail|disable|ignore|cont) # reason function mkl_check_failed { # Override action based on require directives, unless the action is # set to cont (for fallthrough to sub-sequent tests). local action="$3" if [[ $3 != "cont" ]]; then action=$(mkl_meta_get "MOD__$MKL_MODULE" "override_action" $3) fi # --fail-fatal option [[ $MKL_FAILFATAL ]] && action="fail" mkl_check_done "$1" "$2" "$action" "failed" mkl_dbg "Check $1 ($2, action $action (originally $3)) failed: $4" case $action in fail) # Check failed fatally, fail everything eventually mkl_fail "$1" "$2" "$3" "$4$extra" return 1 ;; disable) # Check failed, disable [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "n" return 1 ;; ignore) # Check failed but we ignore the results and set it anyway. [[ ! -z $2 ]] && mkl_define_set "$1" "$2" "1" [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "y" return 1 ;; cont) # Check failed but we ignore the results and do nothing. return 0 ;; esac } ########################################################################### # # # Output generators # # ########################################################################### # Generate late variables. # Late variables are those referenced in command line option defaults # but then never set by --option. function mkl_generate_late_vars { local n for n in $MKL_LATE_VARS ; do local func=${n%:*} local safeopt=${func#opt_} local val=${n#*:} if mkl_in_list "$MKL_OPTS_SET" "$safeopt" ; then # Skip options set explicitly with --option continue fi # Expand variable references "\$foo" by calling eval # and pass it opt_... function. $func "$(eval echo $val)" done } # Generate output files. # Must be called following a succesful configure run. function mkl_generate { local mf= for mf in $MKL_GENERATORS ; do MKL_MODULE=${mf%:*} local func=${mf#*:} $func || exit 1 done mkl_write_mk "# Automatically generated by $0 $*" mkl_write_mk "# Config variables" mkl_write_mk "#" mkl_write_mk "# Generated by:" mkl_write_mk "# $MKL_CONFIGURE_ARGS" mkl_write_mk "" # This variable is used by Makefile.base to avoid multiple inclusions. mkl_write_mk "MKL_MAKEFILE_CONFIG=y" # Export colors to Makefile.config mkl_write_mk "MKL_RED=\t${MKL_RED}" mkl_write_mk "MKL_GREEN=\t${MKL_GREEN}" mkl_write_mk "MKL_YELLOW=\t${MKL_YELLOW}" mkl_write_mk "MKL_BLUE=\t${MKL_BLUE}" mkl_write_mk "MKL_CLR_RESET=\t${MKL_CLR_RESET}" local n= for n in $MKL_MKVARS ; do # Some special variables should be prefixable by the caller, so # define them in the makefile as appends. local op="=" case $n in CFLAGS|CPPFLAGS|CXXFLAGS|LDFLAGS|LIBS) op="+=" ;; esac mkl_write_mk "$n$op\t${!n}" done mkl_write_mk "# End of config variables" MKL_OUTMK_FINAL=Makefile.config mv $MKL_OUTMK $MKL_OUTMK_FINAL echo "Generated $MKL_OUTMK_FINAL" # Generate config.h mkl_write_h "// Automatically generated by $0 $*" mkl_write_h "#pragma once" for n in $MKL_DEFINES ; do mkl_write_h "${!n}" done MKL_OUTH_FINAL=config.h mv $MKL_OUTH $MKL_OUTH_FINAL echo "Generated $MKL_OUTH_FINAL" } # Remove file noisily, if it exists function mkl_rm { if [[ -f $fname ]]; then echo "Removing $fname" rm -f "$fname" fi } # Remove files generated by configure function mkl_clean { for fname in Makefile.config config.h config.cache config.log ; do mkl_rm "$fname" done local mf= for mf in $MKL_CLEANERS ; do MKL_MODULE=${mf%:*} local func=${mf#*:} $func || exit 1 done } # Print summary of succesful configure run function mkl_summary { echo " Configuration summary:" local n= for n in $MKL_MKVARS ; do # Skip the boring booleans if [[ $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then continue fi printf " %-24s %s\n" "$n" "${!n}" done } # Write to mk file # Argument: # string .. function mkl_write_mk { echo -e "$*" >> $MKL_OUTMK } # Write to header file # Argument: # string .. function mkl_write_h { echo -e "$*" >> $MKL_OUTH } ########################################################################### # # # Logging and debugging # # ########################################################################### # Debug print # Only visible on terminal if MKL_DEBUG is set. # Always written to config.log # Argument: # string .. function mkl_dbg { if [[ ! -z $MKL_DEBUG ]]; then echo -e "${MKL_BLUE}DBG:$$: $*${MKL_CLR_RESET}" 1>&2 fi echo "DBG: $*" >> $MKL_OUTDBG } # Error print (with color) # Always printed to terminal and config.log # Argument: # string .. function mkl_err { echo -e "${MKL_RED}$*${MKL_CLR_RESET}" 1>&2 echo "$*" >> $MKL_OUTDBG } # Same as mkl_err but without coloring # Argument: # string .. function mkl_err0 { echo -e "$*" 1>&2 echo "$*" >> $MKL_OUTDBG } # Standard print # Always printed to terminal and config.log # Argument: # string .. function mkl_info { echo -e "$*" 1>&2 echo -e "$*" >> $MKL_OUTDBG } ########################################################################### # # # Misc helpers # # ########################################################################### # Returns the absolute path (but not necesarily canonical) of the first argument function mkl_abspath { echo $1 | sed -e "s|^\([^/]\)|$PWD/\1|" } # Returns true (0) if function $1 exists, else false (1) function mkl_func_exists { declare -f "$1" > /dev/null return $? } # Rename function. # Returns 0 on success or 1 if old function (origname) was not defined. # Arguments: # origname # newname function mkl_func_rename { if ! mkl_func_exists $1 ; then return 1 fi local orig=$(declare -f $1) local new="$2${orig#$1}" eval "$new" unset -f "$1" return 0 } # Push module function for later call by mklove. # The function is renamed to an internal name. # Arguments: # list variable name # module name # function name function mkl_func_push { local newfunc="__mkl__f_${2}_$(( MKL_IDNEXT++ ))" if mkl_func_rename "$3" "$newfunc" ; then mkl_var_append "$1" "$2:$newfunc" fi } # Returns value, or the default string if value is empty. # Arguments: # value # default function mkl_def { if [[ ! -z $1 ]]; then echo $1 else echo $2 fi } # Render a string (e.g., evaluate its $varrefs) # Arguments: # string function mkl_render { if [[ $* == *\$* ]]; then eval "echo $*" else echo "$*" fi } # Escape a string so that it becomes suitable for being an env variable. # This is a destructive operation and the original string cannot be restored. function mkl_env_esc { echo $* | LC_ALL=C sed -e 's/[^a-zA-Z0-9_]/_/g' } # Convert arguments to upper case function mkl_upper { echo "$*" | tr '[:lower:]' '[:upper:]' } # Convert arguments to lower case function mkl_lower { echo "$*" | tr '[:upper:]' '[:lower:]' } # Checks if element is in list # Arguments: # list # element function mkl_in_list { local n for n in $1 ; do [[ $n == $2 ]] && return 0 done return 1 } ########################################################################### # # # Cache functionality # # ########################################################################### # Write cache file function mkl_cache_write { [[ ! -z "$MKL_NOCACHE" ]] && return 0 echo "# mklove configure cache file generated at $(date)" > config.cache for n in $MKL_CACHEVARS ; do echo "$n=${!n}" >> config.cache done echo "Generated config.cache" } # Read cache file function mkl_cache_read { [[ ! -z "$MKL_NOCACHE" ]] && return 0 [ -f config.cache ] || return 1 echo "using cache file config.cache" local ORIG_IFS=$IFS IFS="$IFS=" while read -r n v ; do [[ -z $n || $n = \#* || -z $v ]] && continue mkl_var_set $n $v cache done < config.cache IFS=$ORIG_IFS } ########################################################################### # # # Config name meta data # # ########################################################################### # Set metadata for config name # This metadata is used by mkl in various situations # Arguments: # config name # metadata key # metadata value (appended) function mkl_meta_set { local metaname="mkl__$1__$2" eval "$metaname=\"\$$metaname $3\"" } # Returns metadata for config name # Arguments: # config name # metadata key # default (optional) function mkl_meta_get { local metaname="mkl__$1__$2" if [[ ! -z ${!metaname} ]]; then echo ${!metaname} else echo "$3" fi } # Checks if metadata exists # Arguments: # config name # metadata key function mkl_meta_exists { local metaname="mkl__$1__$2" if [[ ! -z ${!metaname} ]]; then return 0 else return 1 fi } ########################################################################### # # # Check framework # # ########################################################################### # Print that a check is beginning to run # Returns 0 if a cached result was used (do not continue with your tests), # else 1. # # If the check should not be cachable then specify argument 3 as "no-cache", # this is useful when a check not only checks but actually sets config # variables itself (which is not recommended, but desired sometimes). # # Arguments: # [ --verb "verb.." ] (replace "checking for") # config name # define name # action (fail,cont,disable or no-cache) # [ display name ] function mkl_check_begin { local verb="checking for" if [[ $1 == "--verb" ]]; then verb="$2" shift shift fi local name=$(mkl_meta_get $1 name "$4") [[ -z $name ]] && name="x:$1" echo -n "$verb $name..." if [[ $3 != "no-cache" ]]; then local status=$(mkl_var_get "MKL_STATUS_$1") # Check cache (from previous run or this one). # Only used cached value if the cached check succeeded: # it is more likely that a failed check has been fixed than the other # way around. if [[ ! -z $status && ( $status = "ok" ) ]]; then mkl_check_done "$1" "$2" "$3" $status "cached" return 0 fi fi return 1 } # Print that a check is done # Arguments: # config name # define name # action # status (ok|failed) # extra-info (optional) function mkl_check_done { # Clean up configname to be a safe varname local cname=${1//-/_} mkl_var_set "MKL_STATUS_$cname" "$4" cache local extra="" if [[ $4 = "failed" ]]; then local clr=$MKL_YELLOW extra=" ($3)" case "$3" in fail) clr=$MKL_RED ;; cont) extra="" ;; esac echo -e " $clr$4$MKL_CLR_RESET${extra}" else [[ ! -z $2 ]] && mkl_define_set "$cname" "$2" "1" [[ ! -z $2 ]] && mkl_mkvar_set "$cname" "$2" "y" [ ! -z "$5" ] && extra=" ($5)" echo -e " $MKL_GREEN$4${MKL_CLR_RESET}$extra" fi } # Perform configure check by compiling source snippet # Arguments: # [--ldflags="..." ] (appended after "compiler arguments" below) # config name # define name # action (fail|disable) # compiler (CC|CXX) # compiler arguments (optional "", example: "-lzookeeper") # source snippet function mkl_compile_check { local ldf= if [[ $1 == --ldflags=* ]]; then ldf=${1#*=} shift fi mkl_check_begin "$1" "$2" "$3" "$1 (by compile)" && return $? local cflags= if [[ $4 = "CXX" ]]; then local ext=cpp cflags="$(mkl_mkvar_get CXXFLAGS)" else local ext=c cflags="$(mkl_mkvar_get CFLAGS)" fi local srcfile=$(mktemp _mkltmpXXXXXX) mv "$srcfile" "${srcfile}.$ext" srcfile="$srcfile.$ext" echo "$6" > $srcfile echo " int main () { return 0; } " >> $srcfile local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $5 $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS)"; mkl_dbg "Compile check $1 ($2): $cmd" local output output=$($cmd 2>&1) if [[ $? != 0 ]] ; then mkl_dbg "compile check for $1 ($2) failed: $cmd: $output" mkl_check_failed "$1" "$2" "$3" "compile check failed: CC: $4 flags: $5 $cmd: $output source: $6" local ret=1 else mkl_check_done "$1" "$2" "$3" "ok" local ret=0 fi # OSX XCode toolchain creates dSYM directories when -g is set, # delete them specifically. rm -rf "$srcfile" "${srcfile}.o" "$srcfile*dSYM" return $ret } # Try to link with a library. # Arguments: # config name # define name # action (fail|disable) # linker flags (e.g. "-lpthreads") function mkl_link_check { mkl_check_begin "$1" "$2" "$3" "$1 (by linking)" && return $? local srcfile=$(mktemp _mktmpXXXXXX) echo "int main () { return 0; }" > $srcfile local cmd="${CC} $(mkl_mkvar_get LDFLAGS) -c $srcfile -o ${srcfile}.o $4"; mkl_dbg "Link check $1 ($2): $cmd" local output output=$($cmd 2>&1) if [[ $? != 0 ]] ; then mkl_dbg "link check for $1 ($2) failed: $output" mkl_check_failed "$1" "$2" "$3" "compile check failed: $output" local ret=1 else mkl_check_done "$1" "$2" "$3" "ok" "$4" local ret=0 fi rm -f $srcfile* return $ret } # Tries to figure out if we can use a static library or not. # Arguments: # library name (e.g. -lrdkafka) # compiler flags (optional "", e.g: "-lyajl") # Returns/outputs: # New list of compiler flags function mkl_lib_check_static { local libname=$1 local libs=$2 local arfile_var=STATIC_LIB_${libname#-l} # If STATIC_LIB_ specifies an existing .a file we # use that instead. if [[ -f ${!arfile_var} ]]; then libs=$(echo $libs | sed -e "s|$libname|${!arfile_var}|g") else libs=$(echo $libs | sed -e "s|$libname|${LDFLAGS_STATIC} $libname ${LDFLAGS_DYNAMIC}|g") fi echo $libs } # Checks that the specified lib is available through a number of methods. # compiler flags are automatically appended to "LIBS" mkvar on success. # # If STATIC_LIB_ is set to the path of an .a file # it will be used instead of -l. # # Arguments: # [--static=] (allows static linking (--enable-static) for the # library provided, e.g.: --static=-lrdkafka "librdkafka"..) # config name (library name (for pkg-config)) # define name # action (fail|disable|cont) # compiler (CC|CXX) # compiler flags (optional "", e.g: "-lyajl") # source snippet function mkl_lib_check { local staticopt= local is_static=0 if [[ $1 == --static* ]]; then staticopt=$1 shift fi if [[ $WITH_PKGCONFIG == "y" ]]; then if mkl_pkg_config_check $staticopt "$1" "$2" cont; then return 0 fi fi local libs="$5" if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs") is_static=1 fi if ! mkl_compile_check "$1" "$2" "$3" "$4" "$libs" "$6"; then return 1 fi if [[ $is_static == 1 ]]; then mkl_mkvar_prepend "$1" LIBS "$libs" else mkl_mkvar_append "$1" LIBS "$libs" fi return 0 } # Check for library with pkg-config # Automatically sets CFLAGS and LIBS from pkg-config information. # Arguments: # [--static=] (allows static linking (--enable-static) for the # library provided, e.g.: --static=-lrdkafka "librdkafka"..) # config name # define name # action (fail|disable|ignore) function mkl_pkg_config_check { local staticopt= if [[ $1 == --static* ]]; then staticopt=$1 shift fi mkl_check_begin "$1" "$2" "no-cache" "$1 (by pkg-config)" && return $? local cflags= local cmd="${PKG_CONFIG} --short-errors --cflags $1" mkl_dbg "pkg-config check $1 ($2): $cmd" cflags=$($cmd 2>&1) if [[ $? != 0 ]]; then mkl_dbg "'$cmd' failed: $cflags" mkl_check_failed "$1" "$2" "$3" "'$cmd' failed: $cflags" return 1 fi local libs= libs=$(${PKG_CONFIG} --short-errors --libs $1 2>&1) if [[ $? != 0 ]]; then mkl_dbg "${PKG_CONFIG} --libs $1 failed: $libs" mkl_check_failed "$1" "$2" "$3" "pkg-config --libs failed" return 1 fi mkl_mkvar_append $1 "CFLAGS" "$cflags" if [[ $WITH_STATIC_LINKING == y && ! -z $staticopt ]]; then libs=$(mkl_lib_check_static "${staticopt#*=}" "$libs") mkl_mkvar_prepend "$1" LIBS "$libs" else mkl_mkvar_append "$1" LIBS "$libs" fi mkl_check_done "$1" "$2" "$3" "ok" return 0 } # Check that a command runs and exits succesfully. # Arguments: # config name # define name (optional, can be empty) # action # command function mkl_command_check { mkl_check_begin "$1" "$2" "$3" "$1 (by command)" && return $? local out= out=$($4 2>&1) if [[ $? != 0 ]]; then mkl_dbg "$1: $2: $4 failed: $out" mkl_check_failed "$1" "$2" "$3" "command '$4' failed: $out" return 1 fi mkl_check_done "$1" "$2" "$3" "ok" return 0 } # Check that a program is executable, but will not execute it. # Arguments: # config name # define name (optional, can be empty) # action # program name (e.g, objdump) function mkl_prog_check { mkl_check_begin --verb "checking executable" "$1" "$2" "$3" "$1" && return $? local out= out=$(command -v "$4" 2>&1) if [[ $? != 0 ]]; then mkl_dbg "$1: $2: $4 is not executable: $out" mkl_check_failed "$1" "$2" "$3" "$4 is not executable" return 1 fi mkl_check_done "$1" "$2" "$3" "ok" return 0 } # Checks that the check for the given config name passed. # This does not behave like the other checks, if the given config name passed # its test then nothing is printed. Else the configure will fail. # Arguments: # checked config name function mkl_config_check { local status=$(mkl_var_get "MKL_STATUS_$1") [[ $status = "ok" ]] && return 0 mkl_fail $1 "" "fail" "$MKL_MODULE requires $1" return 1 } # Checks that all provided config names are set. # Arguments: # config name # define name # action # check_config_name1 # check_config_name2.. function mkl_config_check_all { local cname= local res="ok" echo start this now for $1 for cname in ${@:4}; do local st=$(mkl_var_get "MKL_STATUS_$cname") [[ $status = "ok" ]] && continue mkl_fail $1 $2 $3 "depends on $cname" res="failed" done echo "try res $res" mkl_check_done "$1" "$2" "$3" $res } # Check environment variable # Arguments: # config name # define name # action # environment variable function mkl_env_check { mkl_check_begin "$1" "$2" "$3" "$1 (by env $4)" && return $? if [[ -z ${!4} ]]; then mkl_check_failed "$1" "$2" "$3" "environment variable $4 not set" return 1 fi mkl_check_done "$1" "$2" "$3" "ok" "${!4}" return 0 } # Run all checks function mkl_checks_run { # Set up common variables mkl_allvar_set "" MKL_APP_NAME $(mkl_meta_get description name) mkl_allvar_set "" MKL_APP_DESC_ONELINE "$(mkl_meta_get description oneline)" # Call checks functions in dependency order local mf for mf in $MKL_CHECKS ; do MKL_MODULE=${mf%:*} local func=${mf#*:} if mkl_func_exists $func ; then $func else mkl_err "Check function $func from $MKL_MODULE disappeared ($mf)" fi unset MKL_MODULE done } # Check for color support in terminal. # If the terminal supports colors, the function will alter # MKL_RED # MKL_GREEN # MKL_YELLOW # MKL_BLUE # MKL_CLR_RESET function mkl_check_terminal_color_support { local use_color=false local has_tput=false if [[ -z ${TERM} ]]; then # tput and dircolors require $TERM mkl_dbg "\$TERM is not set! Cannot check for color support in terminal." return 1 elif hash tput 2>/dev/null; then has_tput=true [[ $(tput colors 2>/dev/null) -ge 8 ]] && use_color=true mkl_dbg "tput reports color support: ${use_color}" elif hash dircolors 2>/dev/null; then # Enable color support only on colorful terminals. # dircolors --print-database uses its own built-in database # instead of using /etc/DIR_COLORS. Try to use the external file # first to take advantage of user additions. local safe_term=${TERM//[^[:alnum:]]/?} local match_lhs="" [[ -f ~/.dir_colors ]] && match_lhs="${match_lhs}$(<~/.dir_colors)" [[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(&1) if [[ $? -ne 0 ]]; then rm -f "$tmpfile" mkl_err "Failed to download $modname:" mkl_err0 $out return 1 fi # Move downloaded file into place replacing the old file. mv "$tmpfile" "$fname" || return 1 # "Return" filename echo "$fname" return 0 } # Load module by name or filename # Arguments: # "require"|"try" # filename # [ module arguments ] function mkl_module_load { local try=$1 shift local fname=$1 shift local modname=${fname#*configure.} local bypath=1 # Check if already loaded if mkl_in_list "$MKL_MODULES" "$modname"; then return 0 fi if [[ $fname = $modname ]]; then # Module specified by name, find the file. bypath=0 for fname in configure.$modname \ ${MKLOVE_DIR}/modules/configure.$modname ; do [[ -s $fname ]] && break done fi # Calling module local cmod=$MKL_MODULE [[ -z $cmod ]] && cmod="base" if [[ ! -s $fname ]]; then # Attempt to download module, if permitted if [[ $MKL_NO_DOWNLOAD != 0 || $bypath == 1 ]]; then mkl_err "Module $modname not found at $fname (required by $cmod) and downloads disabled" if [[ $try = "require" ]]; then mkl_fail "$modname" "none" "fail" \ "Module $modname not found (required by $cmod) and downloads disabled" fi return 1 fi fname=$(mkl_module_download "$modname") if [[ $? -ne 0 ]]; then mkl_err "Module $modname not found (required by $cmod)" if [[ $try = "require" ]]; then mkl_fail "$modname" "none" "fail" \ "Module $modname not found (required by $cmod)" return 1 fi fi # Now downloaded, try loading the module again. mkl_module_load $try "$fname" "$@" return $? fi # Set current module local save_MKL_MODULE=$MKL_MODULE MKL_MODULE=$modname mkl_dbg "Loading module $modname (required by $cmod) from $fname" # Source module file (positional arguments are available to module) source $fname # Restore current module (might be recursive) MKL_MODULE=$save_MKL_MODULE # Add module to list of modules mkl_var_append MKL_MODULES $modname # Rename module's special functions so we can call them separetely later. mkl_func_rename "options" "${modname}_options" mkl_func_push MKL_CHECKS "$modname" "checks" mkl_func_push MKL_GENERATORS "$modname" "generate" mkl_func_push MKL_CLEANERS "$modname" "clean" } # Require and load module # Must only be called from module file outside any function. # Arguments: # [ --try ] Dont fail if module doesn't exist # module1 # [ "must" "pass" ] # [ module arguments ... ] function mkl_require { local try="require" if [[ $1 = "--try" ]]; then local try="try" shift fi local mod=$1 shift local override_action= # Check for cyclic dependencies if mkl_in_list "$MKL_LOAD_STACK" "$mod"; then mkl_err "Cyclic dependency detected while loading $mod module:" local cmod= local lmod=$mod for cmod in $MKL_LOAD_STACK ; do mkl_err " $lmod required by $cmod" lmod=$cmod done mkl_fail base "" fail "Cyclic dependency detected while loading module $mod" return 1 fi mkl_var_prepend MKL_LOAD_STACK "$mod" if [[ "$1 $2" == "must pass" ]]; then shift shift override_action="fail" fi if [[ ! -z $override_action ]]; then mkl_meta_set "MOD__$mod" "override_action" "$override_action" fi mkl_module_load $try $mod "$@" local ret=$? mkl_var_shift MKL_LOAD_STACK return $ret } ########################################################################### # # # Usage options # # ########################################################################### MKL_USAGE="Usage: ./configure [OPTIONS...] mklove configure script - mklove, not autoconf Copyright (c) 2014-2015 Magnus Edenhill - https://github.com/edenhill/mklove " function mkl_usage { echo "$MKL_USAGE" local name=$(mkl_meta_get description name) if [[ ! -z ${name} ]]; then echo " $name - $(mkl_meta_get description oneline) $(mkl_meta_get description copyright) " fi local og for og in $MKL_USAGE_GROUPS ; do og="MKL_USAGE_GROUP__$og" echo "${!og}" done echo "Honoured environment variables: CC, CPP, CXX, CFLAGS, CPPFLAGS, CXXFLAGS, LDFLAGS, LIBS, LD, NM, OBJDUMP, STRIP, PKG_CONFIG, PKG_CONFIG_PATH, STATIC_LIB_=.../libname.a " } # Add usage option informative text # Arguments: # text function mkl_usage_info { MKL_USAGE="$MKL_USAGE $1" } # Add option to usage output # Arguments: # option group ("Standard", "Cross-Compilation", etc..) # variable name # option ("--foo=feh") # help # default (optional) # assignvalue (optional, default:"y") # function block (optional) function mkl_option { local optgroup=$1 local varname=$2 # Fixed width between option name and help in usage output local pad=" " if [[ ${#3} -lt ${#pad} ]]; then pad=${pad:0:$(expr ${#pad} - ${#3})} else pad="" fi # Add to usage output local optgroup_safe=$(mkl_env_esc $optgroup) if ! mkl_in_list "$MKL_USAGE_GROUPS" "$optgroup_safe" ; then mkl_env_append MKL_USAGE_GROUPS "$optgroup_safe" mkl_env_set "MKL_USAGE_GROUP__$optgroup_safe" "$optgroup options: " fi local defstr="" [[ ! -z $5 ]] && defstr=" [$5]" mkl_env_append "MKL_USAGE_GROUP__$optgroup_safe" " $3 $pad $4$defstr " local optname="${3#--}" local safeopt= local optval="" if [[ $3 == *=* ]]; then optname="${optname%=*}" optval="${3#*=}" fi safeopt=$(mkl_env_esc $optname) mkl_meta_set "MKL_OPT_ARGS" "$safeopt" "$optval" # # Optional variable scoping by prefix: "env:", "mk:", "def:" # local setallvar="mkl_allvar_set ''" local setmkvar="mkl_mkvar_set ''" if [[ $varname = env:* ]]; then # Set environment variable (during configure runtime only) varname=${varname#*:} setallvar=mkl_env_set setmkvar=mkl_env_set elif [[ $varname = mk:* ]]; then # Set Makefile.config variable varname=${varname#*:} setallvar="mkl_mkvar_append ''" setmkvar="mkl_mkvar_append ''" elif [[ $varname = def:* ]]; then # Set config.h define varname=${varname#*:} setallvar="mkl_define_set ''" setmkvar="mkl_define_set ''" fi if [[ ! -z $7 ]]; then # Function block specified. eval "function opt_$safeopt { $7 }" else # Add default implementation of function simply setting the value. # Application may override this by redefining the function after calling # mkl_option. if [[ $optval = "PATH" ]]; then # PATH argument: make it an absolute path. # Only set the make variable (not config.h) eval "function opt_$safeopt { $setmkvar $varname \"\$(mkl_abspath \$(mkl_render \$1))\"; }" else # Standard argument: simply set the value if [[ -z "$6" ]]; then eval "function opt_$safeopt { $setallvar $varname \"\$1\"; }" else eval "function opt_$safeopt { $setallvar $varname \"$6\"; }" fi fi fi # If default value is provided and does not start with "$" (variable ref) # then set it right away. # $ variable refs are set after all checks have run during the # generating step. if [[ ${#5} != 0 ]] ; then if [[ $5 = *\$* ]]; then mkl_var_append "MKL_LATE_VARS" "opt_$safeopt:$5" else opt_$safeopt $5 fi fi if [[ ! -z $varname ]]; then # Add variable to list MKL_CONFVARS="$MKL_CONFVARS $varname" fi } # Adds a toggle (--enable-X, --disable-X) option. # Arguments: # option group ("Standard", ..) # variable name (WITH_FOO) # option (--enable-foo) # help ("foo.." ("Enable" and "Disable" will be prepended)) # default (y or n) function mkl_toggle_option { # Add option argument mkl_option "$1" "$2" "$3" "$4" "$5" # Add corresponding "--disable-foo" option for "--enable-foo". local disname="${3/--enable/--disable}" local dishelp="${4/Enable/Disable}" mkl_option "$1" "$2" "$disname" "$dishelp" "" "n" } # Adds a toggle (--enable-X, --disable-X) option with builtin checker. # This is the library version. # Arguments: # option group ("Standard", ..) # config name (foo, must be same as pkg-config name) # variable name (WITH_FOO) # action (fail or disable) # option (--enable-foo) # help (defaults to "Enable ") # linker flags (-lfoo) # default (y or n) function mkl_toggle_option_lib { local help="$6" [[ -z "$help" ]] && help="Enable $2" # Add option argument mkl_option "$1" "$3" "$5" "$help" "$8" # Add corresponding "--disable-foo" option for "--enable-foo". local disname="${5/--enable/--disable}" local dishelp="${help/Enable/Disable}" mkl_option "$1" "$3" "$disname" "$dishelp" "" "n" # Create checks eval "function _tmp_func { mkl_lib_check \"$2\" \"$3\" \"$4\" CC \"$7\"; }" mkl_func_push MKL_CHECKS "$MKL_MODULE" _tmp_func } kafkacat-1.3.1/mklove/modules/configure.builtin000066400000000000000000000056311307612056000216040ustar00rootroot00000000000000#!/bin/bash # # mklove builtin checks and options # Sets: # prefix, etc.. mkl_option "Standard" prefix "--prefix=PATH" \ "Install arch-independent files in PATH" "/usr/local" mkl_option "Standard" exec_prefix "--exec-prefix=PATH" \ "Install arch-dependent files in PATH" "\$prefix" mkl_option "Standard" bindir "--bindir=PATH" "User executables" "\$exec_prefix/bin" mkl_option "Standard" sbindir "--sbindir=PATH" "System admin executables" \ "\$exec_prefix/sbin" mkl_option "Standard" libexecdir "--libexecdir=PATH" "Program executables" \ "\$exec_prefix/libexec" mkl_option "Standard" datadir "--datadir=PATH" "Read-only arch-independent data" \ "\$prefix/share" mkl_option "Standard" sysconfdir "--sysconfdir=PATH" "Configuration data" \ "\$prefix/etc" mkl_option "Standard" sharedstatedir "--sharedstatedir=PATH" \ "Modifiable arch-independent data" "\$prefix/com" mkl_option "Standard" localstatedir "--localstatedir=PATH" \ "Modifiable local state data" "\$prefix/var" mkl_option "Standard" libdir "--libdir=PATH" "Libraries" "\$exec_prefix/lib" mkl_option "Standard" includedir "--includedir=PATH" "C/C++ header files" \ "\$prefix/include" mkl_option "Standard" infodir "--infodir=PATH" "Info documentation" "\$prefix/info" mkl_option "Standard" mandir "--mandir=PATH" "Manual pages" "\$prefix/man" mkl_option "Configure tool" "" "--list-modules" "List loaded mklove modules" mkl_option "Configure tool" "" "--list-checks" "List checks" mkl_option "Configure tool" env:MKL_FAILFATAL "--fail-fatal" "All failures are fatal" mkl_option "Configure tool" env:MKL_NOCACHE "--no-cache" "Dont use or generate config.cache" mkl_option "Configure tool" env:MKL_DEBUG "--debug" "Enable configure debugging" mkl_option "Configure tool" env:MKL_CLEAN "--clean" "Remove generated configure files" mkl_option "Configure tool" "" "--reconfigure" "Rerun configure with same arguments as last run" mkl_option "Configure tool" env:MKL_NO_DOWNLOAD "--no-download" "Disable downloads of required mklove modules" mkl_option "Configure tool" env:MKL_UPDATE_MODS "--update-modules" "Update modules from global repository" mkl_option "Configure tool" env:MKL_REPO_URL "--repo-url=URL_OR_PATH" "Override mklove modules repo URL" "$MKL_REPO_URL" mkl_option "Configure tool" "" "--help" "Show configure usage" mkl_toggle_option "Compatibility" "mk:MKL_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)" mkl_option "Configure tool" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix" mkl_option "Compatibility" "mk:DISABL_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)" mkl_option "Compatibility" "mk:DISABL_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)" function checks { if [[ ! -z $libdir ]]; then mkl_mkvar_append "libdir" LDFLAGS "-L${libdir}" fi if [[ ! -z $includedir ]]; then mkl_mkvar_append "includedir" CPPFLAGS "-I${includedir}" fi } kafkacat-1.3.1/mklove/modules/configure.cc000066400000000000000000000137161307612056000205260ustar00rootroot00000000000000#!/bin/bash # # Compiler detection # Sets: # CC, CXX, CFLAGS, CPPFLAGS, LDFLAGS, ARFLAGS, PKG_CONFIG, INSTALL, MBITS mkl_require host function checks { # C compiler mkl_meta_set "ccenv" "name" "C compiler from CC env" if ! mkl_command_check "ccenv" "WITH_CC" cont "$CC --version"; then if mkl_command_check "gcc" "WITH_GCC" cont "gcc --version"; then CC=gcc elif mkl_command_check "clang" "WITH_CLANG" cont "clang --version"; then CC=clang elif mkl_command_check "cc" "WITH_CC" fail "cc --version"; then CC=cc fi fi export CC="${CC}" mkl_mkvar_set CC CC "$CC" if [[ $MKL_CC_WANT_CXX == 1 ]]; then # C++ compiler mkl_meta_set "cxxenv" "name" "C++ compiler from CXX env" if ! mkl_command_check "cxxenv" "WITH_CXX" cont "$CXX --version" ; then mkl_meta_set "gxx" "name" "C++ compiler (g++)" mkl_meta_set "clangxx" "name" "C++ compiler (clang++)" mkl_meta_set "cxx" "name" "C++ compiler (c++)" if mkl_command_check "gxx" "WITH_GXX" cont "g++ --version"; then CXX=g++ elif mkl_command_check "clangxx" "WITH_CLANGXX" cont "clang++ --version"; then CXX=clang++ elif mkl_command_check "cxx" "WITH_CXX" fail "c++ --version"; then CXX=c++ fi fi export CXX="${CXX}" mkl_mkvar_set "CXX" CXX $CXX fi # Handle machine bits, if specified. if [[ ! -z "$MBITS" ]]; then mkl_meta_set mbits_m name "mbits compiler flag (-m$MBITS)" if mkl_compile_check mbits_m "" fail CC "-m$MBITS"; then mkl_mkvar_append CPPFLAGS CPPFLAGS "-m$MBITS" mkl_mkvar_append LDFLAGS LDFLAGS "-m$MBITS" fi if [[ -z "$ARFLAGS" && $MBITS == 64 && $MKL_DISTRO == "SunOS" ]]; then # Turn on 64-bit archives on SunOS mkl_mkvar_append ARFLAGS ARFLAGS "S" fi fi # Provide prefix and checks for various other build tools. local t= for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip ; do local tenv=${t%:*} t=${t#*:} local tval="${!tenv}" [[ -z $tval ]] && tval="$t" if mkl_prog_check "$t" "" disable "$tval" ; then if [[ $tval != ${!tenv} ]]; then export "$tenv"="$tval" fi mkl_mkvar_set $tenv $tenv "$tval" fi done # Compiler and linker flags [[ ! -z $CFLAGS ]] && mkl_mkvar_set "CFLAGS" "CFLAGS" "$CFLAGS" [[ ! -z $CPPFLAGS ]] && mkl_mkvar_set "CPPFLAGS" "CPPFLAGS" "$CPPFLAGS" [[ ! -z $CXXFLAGS ]] && mkl_mkvar_set "CXXFLAGS" "CXXFLAGS" "$CXXFLAGS" [[ ! -z $LDFLAGS ]] && mkl_mkvar_set "LDFLAGS" "LDFLAGS" "$LDFLAGS" [[ ! -z $ARFLAGS ]] && mkl_mkvar_set "ARFLAGS" "ARFLAGS" "$ARFLAGS" if [[ $MKL_NO_DEBUG_SYMBOLS != "y" ]]; then # Add debug symbol flag (-g) # OSX 10.9 requires -gstrict-dwarf for some reason. mkl_meta_set cc_g_dwarf name "debug symbols compiler flag (-g...)" if [[ $MKL_DISTRO == "osx" ]]; then if mkl_compile_check cc_g_dwarf "" cont CC "-gstrict-dwarf"; then mkl_mkvar_append CPPFLAGS CPPFLAGS "-gstrict-dwarf" else mkl_mkvar_append CPPFLAGS CPPFLAGS "-g" fi else mkl_mkvar_append CPPFLAGS CPPFLAGS "-g" fi fi # pkg-config if [ -z "$PKG_CONFIG" ]; then PKG_CONFIG=pkg-config fi if mkl_command_check "pkgconfig" "WITH_PKGCONFIG" cont "$PKG_CONFIG --version"; then export PKG_CONFIG fi mkl_mkvar_set "pkgconfig" PKG_CONFIG $PKG_CONFIG [[ ! -z "$PKG_CONFIG_PATH" ]] && mkl_env_append PKG_CONFIG_PATH "$PKG_CONFIG_PATH" # install if [ -z "$INSTALL" ]; then if [[ $MKL_DISTRO == "SunOS" ]]; then mkl_meta_set ginstall name "GNU install" if mkl_command_check ginstall "" ignore "ginstall --version"; then INSTALL=ginstall else INSTALL=install fi else INSTALL=install fi fi if mkl_command_check "install" "WITH_INSTALL" cont "$INSTALL --version"; then export INSTALL fi mkl_mkvar_set "install" INSTALL $INSTALL # Enable profiling if desired if [[ $WITH_PROFILING == y ]]; then mkl_allvar_set "" "WITH_PROFILING" "y" mkl_mkvar_append CPPFLAGS CPPFLAGS "-pg" mkl_mkvar_append LDFLAGS LDFLAGS "-pg" fi # Optimization if [[ $WITHOUT_OPTIMIZATION == n ]]; then mkl_mkvar_append CPPFLAGS CPPFLAGS "-O2" else mkl_mkvar_append CPPFLAGS CPPFLAGS "-O0" fi # Static linking if [[ $WITH_STATIC_LINKING == y ]]; then # LDFLAGS_STATIC is the LDFLAGS needed to enable static linking # of sub-sequent libraries, while # LDFLAGS_DYNAMIC is the LDFLAGS needed to enable dynamic linking. mkl_mkvar_set staticlinking LDFLAGS_STATIC "-Wl,-Bstatic" mkl_mkvar_set staticlinking LDFLAGS_DYNAMIC "-Wl,-Bdynamic" fi } mkl_option "Compiler" "env:CC" "--cc=CC" "Build using C compiler CC" "\$CC" mkl_option "Compiler" "env:CXX" "--cxx=CXX" "Build using C++ compiler CXX" "\$CXX" mkl_option "Compiler" "ARCH" "--arch=ARCH" "Build for architecture" "$(uname -m)" mkl_option "Compiler" "CPU" "--cpu=CPU" "Build and optimize for specific CPU" "generic" mkl_option "Compiler" "MBITS" "--mbits=BITS" "Machine bits (32 or 64)" "" for n in CFLAGS CPPFLAGS CXXFLAGS LDFLAGS ARFLAGS; do mkl_option "Compiler" "mk:$n" "--$n=$n" "Add $n flags" done mkl_option "Compiler" "env:PKG_CONFIG_PATH" "--pkg-config-path" "Extra paths for pkg-config" mkl_option "Compiler" "WITH_PROFILING" "--enable-profiling" "Enable profiling" mkl_option "Compiler" "WITH_STATIC_LINKING" "--enable-static" "Enable static linking" mkl_option "Compiler" "WITHOUT_OPTIMIZATION" "--disable-optimization" "Disable optimization flag to compiler" "n" mkl_option "Compiler" "env:MKL_NO_DEBUG_SYMBOLS" "--disable-debug-symbols" "Disable debugging symbols" "n" mkl_option "Compiler" "env:MKL_WANT_WERROR" "--enable-werror" "Enable compiler warnings as errors" "n" kafkacat-1.3.1/mklove/modules/configure.gitversion000066400000000000000000000012021307612056000223150ustar00rootroot00000000000000#!/bin/bash # # Sets version variable from git information. # Optional arguments: # "as" # VARIABLE_NAME # # Example: Set version in variable named "MYVERSION": # mkl_require gitversion as MYVERSION [default DEFVERSION] if [[ $1 == "as" ]]; then shift __MKL_GITVERSION_VARNAME="$1" shift else __MKL_GITVERSION_VARNAME="VERSION" fi if [[ $1 == "default" ]]; then shift __MKL_GITVERSION_DEFAULT="$1" shift fi function checks { mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" \ "$(git describe --abbrev=6 --tags HEAD --always 2>/dev/null || echo $__MKL_GITVERSION_DEFAULT)" } kafkacat-1.3.1/mklove/modules/configure.good_cflags000066400000000000000000000005131307612056000223770ustar00rootroot00000000000000#!/bin/bash # # Provides some known-good CFLAGS # Sets: # CFLAGS # CXXFLAGS # CPPFLAGS function checks { mkl_mkvar_append CPPFLAGS CPPFLAGS \ "-Wall -Wsign-compare -Wfloat-equal -Wpointer-arith" if [[ $MKL_WANT_WERROR = "y" ]]; then mkl_mkvar_append CPPFLAGS CPPFLAGS \ "-Werror" fi } kafkacat-1.3.1/mklove/modules/configure.host000066400000000000000000000053661307612056000211200ustar00rootroot00000000000000#!/bin/bash # # Host OS support # Sets: # HOST # BUILD # TARGET # FIXME: No need for this right now #mkl_require host_linux #mkl_require host_osx #mkl_require host_cygwin #mkl_option "Cross-compilation" "mk:HOST_OS" "--host-os=osname" "Host OS (linux,osx,cygwin,..)" "auto" # autoconf compatibility - does nothing at this point mkl_option "Cross-compilation" "mk:HOST" "--host=HOST" "Configure to build programs to run on HOST (no-op)" mkl_option "Cross-compilation" "mk:BUILD" "--build=BUILD" "Configure for building on BUILD (no-op)" mkl_option "Cross-compilation" "mk:TARGET" "--target=TARGET" "Configure for building cross-toolkits for platform TARGET (no-op)" function checks { # Try to figure out what OS/distro we are running on. mkl_check_begin "distro" "" "no-cache" "OS or distribution" # Try lsb_release local sys sys=$(lsb_release -is 2>/dev/null) if [[ $? -gt 0 ]]; then # That didnt work, try uname. local kn=$(uname -s) case $kn in Linux) sys=Linux ;; Darwin) sys=osx ;; CYGWIN*) sys=Cygwin ;; *) sys="$kn" ;; esac fi if [[ -z $sys ]]; then mkl_check_failed "distro" "" "ignore" "" else mkl_check_done "distro" "" "ignore" "ok" "$sys" mkl_mkvar_set "distro" "MKL_DISTRO" "$sys" fi } #function checks { # mkl_check_begin "host" "HOST_OS" "no-cache" "host OS" # # # # # If --host-os=.. was not specified then this is most likely not a # # a cross-compilation and we can base the host-os on the native OS. # # # if [[ $HOST_OS != "auto" ]]; then # mkl_check_done "host" "HOST_OS" "cont" "ok" "$HOST_OS" # return 0 # fi # # kn=$(uname -s) # case $kn in # Linux) # hostos=linux # ;; # Darwin) # hostos=osx # ;; # CYGWIN*) # hostos=cygwin # ;; # *) # hostos="$(mkl_lower $kn)" # mkl_err "Unknown host OS kernel name: $kn" # mkl_err0 " Will attempt to load module host_$hostos anyway." # mkl_err0 " Please consider writing a configure.host_$hostos" # ;; # esac # # if ! mkl_require --try "host_$hostos"; then # # Module not found # mkl_check_done "host" "HOST_OS" "cont" "failed" "$kn?" # else # # Module loaded # # if mkl_func_exists "host_${hostos}_setup" ; then # "host_${hostos}_setup" # fi # # mkl_check_done "host" "HOST_OS" "cont" "ok" "$hostos" # fi # # # Set HOST_OS var even if probing failed. # mkl_mkvar_set "host" "HOST_OS" "$hostos" #} kafkacat-1.3.1/rdport.h000066400000000000000000000017551307612056000147540ustar00rootroot00000000000000#pragma once /** * Porting */ #ifdef _MSC_VER /* Windows win32 native */ #define WIN32_MEAN_AND_LEAN #include #define RD_NORETURN #define RD_UNUSED /* MSVC loves prefixing POSIX functions with underscore */ #define _COMPAT(FUNC) _ ## FUNC #define STDIN_FILENO 0 typedef SSIZE_T ssize_t; ssize_t getdelim (char **bufptr, size_t *n, int delim, FILE *fp); /** * @brief gettimeofday() for win32 */ static RD_UNUSED int rd_gettimeofday (struct timeval *tv, struct timezone *tz) { SYSTEMTIME st; FILETIME ft; ULARGE_INTEGER d; GetSystemTime(&st); SystemTimeToFileTime(&st, &ft); d.HighPart = ft.dwHighDateTime; d.LowPart = ft.dwLowDateTime; tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L); tv->tv_usec = (long)(st.wMilliseconds * 1000); return 0; } #else /* POSIX */ #define RD_NORETURN __attribute__((noreturn)) #define RD_UNUSED __attribute__((unused)) #define _COMPAT(FUNC) FUNC #define rd_gettimeofday(tv,tz) gettimeofday(tv,tz) #endifkafkacat-1.3.1/rpm/000077500000000000000000000000001307612056000140575ustar00rootroot00000000000000kafkacat-1.3.1/rpm/kafkacat.spec000066400000000000000000000030601307612056000164770ustar00rootroot00000000000000Name: kafkacat Version: 1.2.0 Release: 1%{?dist} Summary: kafkacat is a generic non-JVM producer and consumer for Apache Kafka 0.8, think of it as a netcat for Kafka. Group: Productivity/Networking/Other License: BSD-2-Clause URL: https://github.com/edenhill/kafkacat Source: kafkacat-%{version}.tar.gz Requires: librdkafka1 BuildRequires: zlib-devel gcc >= 4.1 librdkafka-devel BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) %description kafkacat is a generic non-JVM producer and consumer for Apache Kafka 0.8, think of it as a netcat for Kafka. In producer mode kafkacat reads messages from stdin, delimited with a configurable delimeter (-D, defaults to newline), and produces them to the provided Kafka cluster (-b), topic (-t) and partition (-p). In consumer mode kafkacat reads messages from a topic and partition and prints them to stdout using the configured message delimiter. kafkacat also features a Metadata list (-L) mode to display the current state of the Kafka cluster and its topics and partitions. kafkacat is fast and lightweight; statically linked it is no more than 150Kb. %prep %setup -q %configure %build make %install rm -rf %{buildroot} DESTDIR=%{buildroot} make install %clean rm -rf %{buildroot} %files -n %{name} %defattr(755,root,root) %{_bindir}/kafkacat %defattr(644,root,root) %doc README.md %doc LICENSE %changelog * Wed Jun 03 2015 Magnus Edenhill 1.2.0-1 - Relase 1.2.0 * Fri Dec 19 2014 François Saint-Jacques 1.1.0-1 - Initial RPM package kafkacat-1.3.1/tools.c000066400000000000000000000061161307612056000145710ustar00rootroot00000000000000/* * kafkacat - Apache Kafka consumer and producer * * Copyright (c) 2016, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "kafkacat.h" void partition_list_print (rd_kafka_topic_partition_list_t *parts, void *json_gen) { int i; /* Sort by topic+partition */ rd_kafka_topic_partition_list_sort(parts, NULL, NULL); #if ENABLE_JSON if (conf.flags & CONF_F_FMT_JSON) { partition_list_print_json(parts, json_gen); return; } #endif for (i = 0 ; i < parts->cnt ; i++) { const rd_kafka_topic_partition_t *p = &parts->elems[i]; printf("%s [%"PRId32"] offset %"PRId64"%s", p->topic, p->partition, p->offset, !p->err ? "\n": ""); if (p->err) printf(": %s\n", rd_kafka_err2str(p->err)); } } int query_offsets_by_time (rd_kafka_topic_partition_list_t *offsets) { rd_kafka_resp_err_t err; #if RD_KAFKA_VERSION >= 0x00090300 char errstr[512]; if (rd_kafka_conf_set(conf.rk_conf, "api.version.request", "true", errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) FATAL("Failed to enable api.version.request: %s", errstr); if (!(conf.rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf.rk_conf, errstr, sizeof(errstr)))) FATAL("Failed to create producer: %s", errstr); err = rd_kafka_offsets_for_times(conf.rk, offsets, 10*1000); #else err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; #endif if (err) FATAL("offsets_for_times failed: %s", rd_kafka_err2str(err)); partition_list_print(offsets, NULL); rd_kafka_destroy(conf.rk); return 0; } kafkacat-1.3.1/win32/000077500000000000000000000000001307612056000142235ustar00rootroot00000000000000kafkacat-1.3.1/win32/.gitignore000066400000000000000000000000771307612056000162170ustar00rootroot00000000000000*.opendb *.db *.filters *.user packages x64 x86 ?ebug ?elease kafkacat-1.3.1/win32/getdelim.c000066400000000000000000000054371307612056000161720ustar00rootroot00000000000000/* Copyright 2002, Red Hat Inc. - all rights reserved */ /* FUNCTION <>---read a line up to a specified line delimiter INDEX getdelim ANSI_SYNOPSIS #include int getdelim(char **<[bufptr]>, size_t *<[n]>, int <[delim]>, FILE *<[fp]>); TRAD_SYNOPSIS #include int getdelim(<[bufptr]>, <[n]>, <[delim]>, <[fp]>) char **<[bufptr]>; size_t *<[n]>; int <[delim]>; FILE *<[fp]>; DESCRIPTION <> reads a file <[fp]> up to and possibly including a specified delimiter <[delim]>. The line is read into a buffer pointed to by <[bufptr]> and designated with size *<[n]>. If the buffer is not large enough, it will be dynamically grown by <>. As the buffer is grown, the pointer to the size <[n]> will be updated. RETURNS <> returns <<-1>> if no characters were successfully read; otherwise, it returns the number of bytes successfully read. At end of file, the result is nonzero. PORTABILITY <> is a glibc extension. No supporting OS subroutines are directly required. */ #include #include #include #include "../rdport.h" #define MIN_LINE_SIZE 4 #define DEFAULT_LINE_SIZE 128 ssize_t getdelim (char **bufptr, size_t *n, int delim, FILE *fp) { char *buf; char *ptr; size_t newsize, numbytes; int pos; int ch; int cont; if (fp == NULL || bufptr == NULL || n == NULL) { errno = EINVAL; return -1; } buf = *bufptr; if (buf == NULL || *n < MIN_LINE_SIZE) { buf = (char *)realloc (*bufptr, DEFAULT_LINE_SIZE); if (buf == NULL) { return -1; } *bufptr = buf; *n = DEFAULT_LINE_SIZE; } numbytes = *n; ptr = buf; cont = 1; while (cont) { /* fill buffer - leaving room for nul-terminator */ while (--numbytes > 0) { if ((ch = getc (fp)) == EOF) { cont = 0; break; } else { *ptr++ = ch; if (ch == delim) { cont = 0; break; } } } if (cont) { /* Buffer is too small so reallocate a larger buffer. */ pos = ptr - buf; newsize = (*n << 1); buf = realloc (buf, newsize); if (buf == NULL) { cont = 0; break; } /* After reallocating, continue in new buffer */ *bufptr = buf; *n = newsize; ptr = buf + pos; numbytes = newsize - pos; } } /* if no input data, return failure */ if (ptr == buf) return -1; /* otherwise, nul-terminate and return number of bytes read */ *ptr = '\0'; return (ssize_t)(ptr - buf); } kafkacat-1.3.1/win32/kafkacat.sln000066400000000000000000000023611307612056000165100ustar00rootroot00000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.25420.1 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "kafkacat", "kafkacat.vcxproj", "{73BD034C-37C2-463C-A0A6-E118E483ED0B}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Debug|x64.ActiveCfg = Debug|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Debug|x64.Build.0 = Debug|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Debug|x86.ActiveCfg = Debug|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Debug|x86.Build.0 = Debug|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Release|x64.ActiveCfg = Release|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Release|x64.Build.0 = Release|x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Release|x86.ActiveCfg = Release|Win32 {73BD034C-37C2-463C-A0A6-E118E483ED0B}.Release|x86.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal kafkacat-1.3.1/win32/kafkacat.vcxproj000066400000000000000000000171761307612056000174210ustar00rootroot00000000000000 Debug Win32 Release Win32 Debug x64 Release x64 {73BD034C-37C2-463C-A0A6-E118E483ED0B} Win32Proj 8.1 Application true v140 Static Application false v140 Static Application true v140 Static Application false v140 Static true true WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase Disabled MachineX86 true Console WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase MachineX86 true Console true true MultiThreaded MultiThreaded This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. kafkacat-1.3.1/win32/packages.config000066400000000000000000000004451307612056000171730ustar00rootroot00000000000000 kafkacat-1.3.1/win32/win32_config.h000066400000000000000000000001111307612056000166540ustar00rootroot00000000000000#pragma once #define KAFKACAT_VERSION "1.3.0-dev" /* Manually updated */kafkacat-1.3.1/win32/wingetopt.c000066400000000000000000000364011307612056000164130ustar00rootroot00000000000000/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */ /* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ /* * Copyright (c) 2002 Todd C. Miller * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F39502-99-1-0512. */ /*- * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Dieter Baron and Thomas Klausner. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include "wingetopt.h" #include #include #include #define REPLACE_GETOPT /* use this getopt as the system getopt(3) */ #ifdef REPLACE_GETOPT int opterr = 1; /* if error message should be printed */ int optind = 1; /* index into parent argv vector */ int optopt = '?'; /* character checked for validity */ #undef optreset /* see getopt.h */ #define optreset __mingw_optreset int optreset; /* reset getopt */ char *optarg; /* argument associated with option */ #endif #define PRINT_ERROR ((opterr) && (*options != ':')) #define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ #define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ #define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ /* return values */ #define BADCH (int)'?' #define BADARG ((*options == ':') ? (int)':' : (int)'?') #define INORDER (int)1 #ifndef __CYGWIN__ #define __progname __argv[0] #else extern char __declspec(dllimport) *__progname; #endif #ifdef __CYGWIN__ static char EMSG[] = ""; #else #define EMSG "" #endif static int getopt_internal(int, char * const *, const char *, const struct option *, int *, int); static int parse_long_options(char * const *, const char *, const struct option *, int *, int); static int gcd(int, int); static void permute_args(int, int, int, char * const *); static char *place = EMSG; /* option letter processing */ /* XXX: set optreset to 1 rather than these two */ static int nonopt_start = -1; /* first non option argument (for permute) */ static int nonopt_end = -1; /* first option after non options (for permute) */ /* Error messages */ static const char recargchar[] = "option requires an argument -- %c"; static const char recargstring[] = "option requires an argument -- %s"; static const char ambig[] = "ambiguous option -- %.*s"; static const char noarg[] = "option doesn't take an argument -- %.*s"; static const char illoptchar[] = "unknown option -- %c"; static const char illoptstring[] = "unknown option -- %s"; static void _vwarnx(const char *fmt,va_list ap) { (void)fprintf(stderr,"%s: ",__progname); if (fmt != NULL) (void)vfprintf(stderr,fmt,ap); (void)fprintf(stderr,"\n"); } static void warnx(const char *fmt,...) { va_list ap; va_start(ap,fmt); _vwarnx(fmt,ap); va_end(ap); } /* * Compute the greatest common divisor of a and b. */ static int gcd(int a, int b) { int c; c = a % b; while (c != 0) { a = b; b = c; c = a % b; } return (b); } /* * Exchange the block from nonopt_start to nonopt_end with the block * from nonopt_end to opt_end (keeping the same order of arguments * in each block). */ static void permute_args(int panonopt_start, int panonopt_end, int opt_end, char * const *nargv) { int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; char *swap; /* * compute lengths of blocks and number and size of cycles */ nnonopts = panonopt_end - panonopt_start; nopts = opt_end - panonopt_end; ncycle = gcd(nnonopts, nopts); cyclelen = (opt_end - panonopt_start) / ncycle; for (i = 0; i < ncycle; i++) { cstart = panonopt_end+i; pos = cstart; for (j = 0; j < cyclelen; j++) { if (pos >= panonopt_end) pos -= nnonopts; else pos += nopts; swap = nargv[pos]; /* LINTED const cast */ ((char **) nargv)[pos] = nargv[cstart]; /* LINTED const cast */ ((char **)nargv)[cstart] = swap; } } } /* * parse_long_options -- * Parse long options in argc/argv argument vector. * Returns -1 if short_too is set and the option does not match long_options. */ static int parse_long_options(char * const *nargv, const char *options, const struct option *long_options, int *idx, int short_too) { char *current_argv, *has_equal; size_t current_argv_len; int i, ambiguous, match; #define IDENTICAL_INTERPRETATION(_x, _y) \ (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \ long_options[(_x)].flag == long_options[(_y)].flag && \ long_options[(_x)].val == long_options[(_y)].val) current_argv = place; match = -1; ambiguous = 0; optind++; if ((has_equal = strchr(current_argv, '=')) != NULL) { /* argument found (--option=arg) */ current_argv_len = has_equal - current_argv; has_equal++; } else current_argv_len = strlen(current_argv); for (i = 0; long_options[i].name; i++) { /* find matching long option */ if (strncmp(current_argv, long_options[i].name, current_argv_len)) continue; if (strlen(long_options[i].name) == current_argv_len) { /* exact match */ match = i; ambiguous = 0; break; } /* * If this is a known short option, don't allow * a partial match of a single character. */ if (short_too && current_argv_len == 1) continue; if (match == -1) /* partial match */ match = i; else if (!IDENTICAL_INTERPRETATION(i, match)) ambiguous = 1; } if (ambiguous) { /* ambiguous abbreviation */ if (PRINT_ERROR) warnx(ambig, (int)current_argv_len, current_argv); optopt = 0; return (BADCH); } if (match != -1) { /* option found */ if (long_options[match].has_arg == no_argument && has_equal) { if (PRINT_ERROR) warnx(noarg, (int)current_argv_len, current_argv); /* * XXX: GNU sets optopt to val regardless of flag */ if (long_options[match].flag == NULL) optopt = long_options[match].val; else optopt = 0; return (BADARG); } if (long_options[match].has_arg == required_argument || long_options[match].has_arg == optional_argument) { if (has_equal) optarg = has_equal; else if (long_options[match].has_arg == required_argument) { /* * optional argument doesn't use next nargv */ optarg = nargv[optind++]; } } if ((long_options[match].has_arg == required_argument) && (optarg == NULL)) { /* * Missing argument; leading ':' indicates no error * should be generated. */ if (PRINT_ERROR) warnx(recargstring, current_argv); /* * XXX: GNU sets optopt to val regardless of flag */ if (long_options[match].flag == NULL) optopt = long_options[match].val; else optopt = 0; --optind; return (BADARG); } } else { /* unknown option */ if (short_too) { --optind; return (-1); } if (PRINT_ERROR) warnx(illoptstring, current_argv); optopt = 0; return (BADCH); } if (idx) *idx = match; if (long_options[match].flag) { *long_options[match].flag = long_options[match].val; return (0); } else return (long_options[match].val); #undef IDENTICAL_INTERPRETATION } /* * getopt_internal -- * Parse argc/argv argument vector. Called by user level routines. */ static int getopt_internal(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx, int flags) { char *oli; /* option letter list index */ int optchar, short_too; static int posixly_correct = -1; if (options == NULL) return (-1); /* * XXX Some GNU programs (like cvs) set optind to 0 instead of * XXX using optreset. Work around this braindamage. */ if (optind == 0) optind = optreset = 1; /* * Disable GNU extensions if POSIXLY_CORRECT is set or options * string begins with a '+'. * * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or * optreset != 0 for GNU compatibility. */ #ifndef _MSC_VER if (posixly_correct == -1 || optreset != 0) posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); #endif if (*options == '-') flags |= FLAG_ALLARGS; else if (posixly_correct || *options == '+') flags &= ~FLAG_PERMUTE; if (*options == '+' || *options == '-') options++; optarg = NULL; if (optreset) nonopt_start = nonopt_end = -1; start: if (optreset || !*place) { /* update scanning pointer */ optreset = 0; if (optind >= nargc) { /* end of argument vector */ place = EMSG; if (nonopt_end != -1) { /* do permutation, if we have to */ permute_args(nonopt_start, nonopt_end, optind, nargv); optind -= nonopt_end - nonopt_start; } else if (nonopt_start != -1) { /* * If we skipped non-options, set optind * to the first of them. */ optind = nonopt_start; } nonopt_start = nonopt_end = -1; return (-1); } if (*(place = nargv[optind]) != '-' || (place[1] == '\0' && strchr(options, '-') == NULL)) { place = EMSG; /* found non-option */ if (flags & FLAG_ALLARGS) { /* * GNU extension: * return non-option as argument to option 1 */ optarg = nargv[optind++]; return (INORDER); } if (!(flags & FLAG_PERMUTE)) { /* * If no permutation wanted, stop parsing * at first non-option. */ return (-1); } /* do permutation */ if (nonopt_start == -1) nonopt_start = optind; else if (nonopt_end != -1) { permute_args(nonopt_start, nonopt_end, optind, nargv); nonopt_start = optind - (nonopt_end - nonopt_start); nonopt_end = -1; } optind++; /* process next argument */ goto start; } if (nonopt_start != -1 && nonopt_end == -1) nonopt_end = optind; /* * If we have "-" do nothing, if "--" we are done. */ if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { optind++; place = EMSG; /* * We found an option (--), so if we skipped * non-options, we have to permute. */ if (nonopt_end != -1) { permute_args(nonopt_start, nonopt_end, optind, nargv); optind -= nonopt_end - nonopt_start; } nonopt_start = nonopt_end = -1; return (-1); } } /* * Check long options if: * 1) we were passed some * 2) the arg is not just "-" * 3) either the arg starts with -- we are getopt_long_only() */ if (long_options != NULL && place != nargv[optind] && (*place == '-' || (flags & FLAG_LONGONLY))) { short_too = 0; if (*place == '-') place++; /* --foo long option */ else if (*place != ':' && strchr(options, *place) != NULL) short_too = 1; /* could be short option too */ optchar = parse_long_options(nargv, options, long_options, idx, short_too); if (optchar != -1) { place = EMSG; return (optchar); } } if ((optchar = (int)*place++) == (int)':' || (optchar == (int)'-' && *place != '\0') || (oli = strchr(options, optchar)) == NULL) { /* * If the user specified "-" and '-' isn't listed in * options, return -1 (non-option) as per POSIX. * Otherwise, it is an unknown option character (or ':'). */ if (optchar == (int)'-' && *place == '\0') return (-1); if (!*place) ++optind; if (PRINT_ERROR) warnx(illoptchar, optchar); optopt = optchar; return (BADCH); } if (long_options != NULL && optchar == 'W' && oli[1] == ';') { /* -W long-option */ if (*place) /* no space */ /* NOTHING */; else if (++optind >= nargc) { /* no arg */ place = EMSG; if (PRINT_ERROR) warnx(recargchar, optchar); optopt = optchar; return (BADARG); } else /* white space */ place = nargv[optind]; optchar = parse_long_options(nargv, options, long_options, idx, 0); place = EMSG; return (optchar); } if (*++oli != ':') { /* doesn't take argument */ if (!*place) ++optind; } else { /* takes (optional) argument */ optarg = NULL; if (*place) /* no white space */ optarg = place; else if (oli[1] != ':') { /* arg not optional */ if (++optind >= nargc) { /* no arg */ place = EMSG; if (PRINT_ERROR) warnx(recargchar, optchar); optopt = optchar; return (BADARG); } else optarg = nargv[optind]; } place = EMSG; ++optind; } /* dump back option letter */ return (optchar); } #ifdef REPLACE_GETOPT /* * getopt -- * Parse argc/argv argument vector. * * [eventually this will replace the BSD getopt] */ int getopt(int nargc, char * const *nargv, const char *options) { /* * We don't pass FLAG_PERMUTE to getopt_internal() since * the BSD getopt(3) (unlike GNU) has never done this. * * Furthermore, since many privileged programs call getopt() * before dropping privileges it makes sense to keep things * as simple (and bug-free) as possible. */ return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); } #endif /* REPLACE_GETOPT */ /* * getopt_long -- * Parse argc/argv argument vector. */ int getopt_long(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx) { return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE)); } /* * getopt_long_only -- * Parse argc/argv argument vector. */ int getopt_long_only(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx) { return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE|FLAG_LONGONLY)); } kafkacat-1.3.1/win32/wingetopt.h000066400000000000000000000060061307612056000164160ustar00rootroot00000000000000#ifndef __GETOPT_H__ /** * DISCLAIMER * This file has no copyright assigned and is placed in the Public Domain. * This file is a part of the w64 mingw-runtime package. * * The w64 mingw-runtime package and its code is distributed in the hope that it * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #define __GETOPT_H__ /* All the headers include this file. */ #include #ifdef __cplusplus extern "C" { #endif extern int optind; /* index of first non-option in argv */ extern int optopt; /* single option character, as parsed */ extern int opterr; /* flag to enable built-in diagnostics... */ /* (user may set to zero, to suppress) */ extern char *optarg; /* pointer to argument of current option */ extern int getopt(int nargc, char * const *nargv, const char *options); #ifdef _BSD_SOURCE /* * BSD adds the non-standard `optreset' feature, for reinitialisation * of `getopt' parsing. We support this feature, for applications which * proclaim their BSD heritage, before including this header; however, * to maintain portability, developers are advised to avoid it. */ # define optreset __mingw_optreset extern int optreset; #endif #ifdef __cplusplus } #endif /* * POSIX requires the `getopt' API to be specified in `unistd.h'; * thus, `unistd.h' includes this header. However, we do not want * to expose the `getopt_long' or `getopt_long_only' APIs, when * included in this manner. Thus, close the standard __GETOPT_H__ * declarations block, and open an additional __GETOPT_LONG_H__ * specific block, only when *not* __UNISTD_H_SOURCED__, in which * to declare the extended API. */ #endif /* !defined(__GETOPT_H__) */ #if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) #define __GETOPT_LONG_H__ #ifdef __cplusplus extern "C" { #endif struct option /* specification for a long form option... */ { const char *name; /* option name, without leading hyphens */ int has_arg; /* does it take an argument? */ int *flag; /* where to save its status, or NULL */ int val; /* its associated status value */ }; enum /* permitted values for its `has_arg' field... */ { no_argument = 0, /* option never takes an argument */ required_argument, /* option always requires an argument */ optional_argument /* option may take an argument */ }; extern int getopt_long(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx); extern int getopt_long_only(int nargc, char * const *nargv, const char *options, const struct option *long_options, int *idx); /* * Previous MinGW implementation had... */ #ifndef HAVE_DECL_GETOPT /* * ...for the long form API only; keep this for compatibility. */ # define HAVE_DECL_GETOPT 1 #endif #ifdef __cplusplus } #endif #endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */