mydumper-0.5.2/CMakeLists.txt0000644000000000000000000000413112052470045014244 0ustar 00000000000000cmake_minimum_required(VERSION 2.6) project(mydumper) set(VERSION 0.5.2) set(ARCHIVE_NAME "${CMAKE_PROJECT_NAME}-${VERSION}") #Required packages set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules) find_package(MySQL) find_package(ZLIB) find_package(GLIB2) find_package(PCRE) option(BUILD_DOCS "Build the documentation" ON) if (BUILD_DOCS) add_subdirectory(docs) endif (BUILD_DOCS) set(CMAKE_C_FLAGS "-Wall -Wno-deprecated-declarations -Wunused -Wwrite-strings -Wno-strict-aliasing -Wextra -Wshadow -Werror -O3 -g ${MYSQL_CFLAGS}") include_directories(${MYDUMPER_SOURCE_DIR} ${MYSQL_INCLUDE_DIR} ${GLIB2_INCLUDE_DIR} ${PCRE_INCLUDE_DIR} ${ZLIB_INCLUDE_DIRS}) if (NOT CMAKE_INSTALL_PREFIX) SET(CMAKE_INSTALL_PREFIX "/usr/local" CACHE STRING "Install path" FORCE) endif (NOT CMAKE_INSTALL_PREFIX) MARK_AS_ADVANCED(CMAKE) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/config.h) add_executable(mydumper mydumper.c binlog.c server_detect.c g_unix_signal.c) target_link_libraries(mydumper ${MYSQL_LIBRARIES_mysqlclient_r} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES}) add_executable(myloader myloader.c) target_link_libraries(myloader ${MYSQL_LIBRARIES_mysqlclient_r} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES}) INSTALL(TARGETS mydumper myloader RUNTIME DESTINATION bin ) add_custom_target(dist COMMAND bzr export --root=${ARCHIVE_NAME} ${CMAKE_BINARY_DIR}/${ARCHIVE_NAME}.tar.gz WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) OPTION(RUN_CPPCHECK "Run cppcheck" OFF) IF(RUN_CPPCHECK) include(CppcheckTargets) add_cppcheck(mydumper) add_cppcheck(myloader) ENDIF(RUN_CPPCHECK) MESSAGE(STATUS "------------------------------------------------") MESSAGE(STATUS "MYSQL_CONFIG = ${MYSQL_CONFIG}") MESSAGE(STATUS "CMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}") MESSAGE(STATUS "BUILD_DOCS = ${BUILD_DOCS}") MESSAGE(STATUS "RUN_CPPCHECK = ${RUN_CPPCHECK}") MESSAGE(STATUS "Change a values with: cmake -D=") MESSAGE(STATUS "------------------------------------------------") MESSAGE(STATUS) mydumper-0.5.2/README0000644000000000000000000000433612052470045012373 0ustar 00000000000000== What is mydumper? Why? == * Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall) * Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data) * Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc * Manageability - supports PCRE for specifying database and tables inclusions and exclusions It does not support schema dumping and leaves that to 'mysqldump --no-data' == How to build it? == Run: cmake . make One needs to install development versions of required libaries (MySQL, GLib, ZLib, PCRE): * Ubuntu or Debian: apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev * Fedora, RedHat and CentOS: yum install glib2-devel mysql-devel zlib-devel pcre-devel * openSUSE: zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel * MacOSX: port install glib2 mysql5 pcre pkgconfig cmake (You may want to run 'port select mysql mysql5' afterwards) One has to make sure, that pkg-config, mysql_config, pcre-config are all in $PATH == How does consistent snapshot work? == This is all done following best MySQL practices and traditions: * As a precaution, slow running queries on the server either abort the dump, or get killed * Global write lock is acquired ("FLUSH TABLES WITH READ LOCK") * Various metadata is read ("SHOW SLAVE STATUS","SHOW MASTER STATUS") * Other threads connect and establish snapshots ("START TRANSACTION WITH CONSISTENT SNAPSHOT") ** On pre-4.1.8 it creates dummy InnoDB table, and reads from it. * Once all worker threads announce the snapshot establishment, master executes "UNLOCK TABLES" and starts queueing jobs. This for now does not provide consistent snapshots for non-transactional engines - support for that is expected in 0.2 :) == How to exclude (or include) databases? == Once can use --regex functionality, for example not to dump mysql and test databases: mydumper --regex '^(?!(mysql|test))' Of course, regex functionality can be used to describe pretty much any list of tables. == How to exclude MERGE or Federated tables == Use same --regex exclusion syntax. Again, engine-specific behaviors are targetted for 0.2 mydumper-0.5.2/binlog.c0000644000000000000000000001775312052470045013140 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Domas Mituzas, Facebook ( domas at fb dot com ) Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #include #include #include #include #include #include #include #include #include #include "mydumper.h" #include "binlog.h" #define BINLOG_MAGIC "\xfe\x62\x69\x6e" #define EVENT_HEADER_LENGTH 19 #define EVENT_ROTATE_FIXED_LENGTH 8 enum event_postions { EVENT_TIMESTAMP_POSITION= 0, EVENT_TYPE_POSITION= 4, EVENT_SERVERID_POSITION= 5, EVENT_LENGTH_POSITION= 9, EVENT_NEXT_POSITION= 13, EVENT_FLAGS_POSITION= 17, EVENT_EXTRA_FLAGS_POSITION= 19 // currently unused in v4 binlogs, but a good marker for end of header }; enum event_type { ROTATE_EVENT= 4, FORMAT_DESCRIPTION_EVENT= 15, EVENT_TOO_SHORT= 254 // arbitrary high number, in 5.1 the max event type number is 27 so this should be fine for a while }; extern int compress_output; extern gboolean daemon_mode; extern gboolean shutdown_triggered; FILE *new_binlog_file(char *binlog_file, const char *binlog_dir); void close_binlog_file(FILE *outfile); char *rotate_file_name(const char *buf); void get_binlogs(MYSQL *conn, struct configuration *conf) { // TODO: find logs we already have, use start position based on position of last log. MYSQL_RES *result; MYSQL_ROW row; char* last_filename = NULL; guint64 last_position; // Only snapshot dump the binlogs once in daemon mode static gboolean got_binlogs= FALSE; if (got_binlogs) return; else got_binlogs= TRUE; if (mysql_query(conn, "SHOW MASTER STATUS")) { g_critical("Error: Could not execute query: %s", mysql_error(conn)); return; } result = mysql_store_result(conn); if ((row = mysql_fetch_row(result))) { last_filename= g_strdup(row[0]); last_position= strtoll(row[1], NULL, 10); } else { g_critical("Error: Could not obtain binary log stop position"); if (last_filename != NULL) g_free(last_filename); return; } mysql_free_result(result); if (mysql_query(conn, "SHOW BINARY LOGS")) { g_critical("Error: Could not execute query: %s", mysql_error(conn)); if (last_filename != NULL) g_free(last_filename); return; } result = mysql_store_result(conn); while ((row = mysql_fetch_row(result))) { struct job *j = g_new0(struct job,1); struct binlog_job *bj = g_new0(struct binlog_job,1); j->job_data=(void*) bj; bj->filename=g_strdup(row[0]); bj->start_position=4; bj->stop_position= (!strcasecmp(row[0], last_filename)) ? last_position : 0; j->conf=conf; j->type=JOB_BINLOG; g_async_queue_push(conf->queue,j); } mysql_free_result(result); if (last_filename != NULL) g_free(last_filename); } void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous) { // set serverID = max serverID - threadID to try an eliminate conflicts, // 0 is bad because mysqld will disconnect at the end of the last log // dupes aren't too bad since it is up to the client to check for them uchar buf[128]; // We need to read the raw network packets NET* net; net= &conn->net; unsigned long len; FILE* outfile; guint32 event_type; gboolean read_error= FALSE; gboolean read_end= FALSE; gboolean rotated= FALSE; guint32 server_id= G_MAXUINT32 - mysql_thread_id(conn); guint64 pos_counter= 0; int4store(buf, (guint32)start_position); // Binlog flags (2 byte int) int2store(buf + 4, 0); // ServerID int4store(buf + 6, server_id); memcpy(buf + 10, binlog_file, strlen(binlog_file)); #if MYSQL_VERSION_ID < 50100 if (simple_command(conn, COM_BINLOG_DUMP, (const char *)buf, #else if (simple_command(conn, COM_BINLOG_DUMP, buf, #endif strlen(binlog_file) + 10, 1)) { g_critical("Error: binlog: Critical error whilst requesting binary log"); } while(1) { outfile= new_binlog_file(binlog_file, binlog_directory); if (outfile == NULL) { g_critical("Error: binlog: Could not create binlog file '%s', %d", binlog_file, errno); return; } write_binlog(outfile, BINLOG_MAGIC, 4); while(1) { len = 0; if (net->vio != 0) len=my_net_read(net); if ((len == 0) || (len == ~(unsigned long) 0)) { // Net timeout (set to 1 second) if (mysql_errno(conn) == ER_NET_READ_INTERRUPTED) { if (shutdown_triggered) { close_binlog_file(outfile); return; } else { continue; } // A real error } else { g_critical("Error: binlog: Network packet read error getting binlog file: %s", binlog_file); close_binlog_file(outfile); return; } } if (len < 8 && net->read_pos[0]) { // end of data break; } pos_counter += len; event_type= get_event((const char*)net->read_pos + 1, len -1); switch (event_type) { case EVENT_TOO_SHORT: g_critical("Error: binlog: Event too short in binlog file: %s", binlog_file); read_error= TRUE; break; case ROTATE_EVENT: if (rotated) { read_end= TRUE; } else { len= 1; rotated= TRUE; } break; default: // if we get this far this is a normal event to record break; } if (read_error) break; write_binlog(outfile, (const char*)net->read_pos + 1, len - 1); if (read_end) { if (!continuous) { break; } else { g_free(binlog_file); binlog_file= rotate_file_name((const char*)net->read_pos + 1); break; } } // stop if we are at requested end of last log if ((stop_position > 0) && (pos_counter >= stop_position)) break; } close_binlog_file(outfile); if ((!continuous) || (!read_end)) break; if (continuous && read_end) { read_end= FALSE; rotated= FALSE; } } } char *rotate_file_name(const char *buf) { guint32 event_length= 0; // event length is 4 bytes at position 9 event_length= uint4korr(&buf[EVENT_LENGTH_POSITION]); // event length includes the header, plus a rotate event has a fixed 8byte part we don't need event_length= event_length - EVENT_HEADER_LENGTH - EVENT_ROTATE_FIXED_LENGTH; return g_strndup(&buf[EVENT_HEADER_LENGTH + EVENT_ROTATE_FIXED_LENGTH], event_length); } FILE *new_binlog_file(char *binlog_file, const char *binlog_dir) { FILE *outfile; char* filename; if (!compress_output) { filename= g_strdup_printf("%s/%s", binlog_dir, binlog_file); outfile= g_fopen(filename, "w"); } else { filename= g_strdup_printf("%s/%s.gz", binlog_dir, binlog_file); outfile= (void*) gzopen(filename, "w"); } g_free(filename); return outfile; } void close_binlog_file(FILE *outfile) { if (!compress_output) fclose(outfile); else gzclose((gzFile) outfile); } unsigned int get_event(const char *buf, unsigned int len) { if (len < EVENT_TYPE_POSITION) return EVENT_TOO_SHORT; return buf[EVENT_TYPE_POSITION]; // TODO: Would be good if we can check for valid event type, unfortunately this check can change from version to version } void write_binlog(FILE* file, const char* data, guint64 len) { int err; if (len > 0) { int write_result; if (!compress_output) write_result= write(fileno(file), data, len); else write_result= gzwrite((gzFile)file, data, len); if (write_result <= 0) { if (!compress_output) g_critical("Error: binlog: Error writing binary log: %s", strerror(errno)); else g_critical("Error: binlog: Error writing compressed binary log: %s", gzerror((gzFile)file, &err)); } } } mydumper-0.5.2/binlog.h0000644000000000000000000000233412052470045013132 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Domas Mituzas, Facebook ( domas at fb dot com ) Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #ifndef _binlog_h #define _binlog_h #include "mydumper.h" void get_binlogs(MYSQL *conn, struct configuration *conf); void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous); unsigned int get_event(const char *buf, unsigned int len); void write_binlog(FILE* file, const char* data, guint64 len); #endif mydumper-0.5.2/cmake/0000755000000000000000000000000012052470045012565 5ustar 00000000000000mydumper-0.5.2/common.h0000644000000000000000000000404112052470045013145 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #ifndef _common_h #define _common_h char *hostname=NULL; char *username=NULL; char *password=NULL; char *socket_path=NULL; char *db=NULL; guint port=3306; guint num_threads= 4; guint verbose=2; gboolean compress_protocol= FALSE; gboolean program_version= FALSE; GOptionEntry common_entries[] = { { "host", 'h', 0, G_OPTION_ARG_STRING, &hostname, "The host to connect to", NULL }, { "user", 'u', 0, G_OPTION_ARG_STRING, &username, "Username with privileges to run the dump", NULL }, { "password", 'p', 0, G_OPTION_ARG_STRING, &password, "User password", NULL }, { "port", 'P', 0, G_OPTION_ARG_INT, &port, "TCP/IP port to connect to", NULL }, { "socket", 'S', 0, G_OPTION_ARG_STRING, &socket_path, "UNIX domain socket file to use for connection", NULL }, { "threads", 't', 0, G_OPTION_ARG_INT, &num_threads, "Number of threads to use, default 4", NULL }, { "compress-protocol", 'C', 0, G_OPTION_ARG_NONE, &compress_protocol, "Use compression on the MySQL connection", NULL }, { "version", 'V', 0, G_OPTION_ARG_NONE, &program_version, "Show the program version and exit", NULL }, { "verbose", 'v', 0, G_OPTION_ARG_INT, &verbose, "Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } }; #endif mydumper-0.5.2/config.h.in0000644000000000000000000000011412052470045013524 0ustar 00000000000000#ifndef CONFIG_H #define CONFIG_H #cmakedefine VERSION "@VERSION@" #endif mydumper-0.5.2/docs/0000755000000000000000000000000012052470045012435 5ustar 00000000000000mydumper-0.5.2/g_unix_signal.c0000644000000000000000000000703612052470045014505 0ustar 00000000000000#define _POSIX_SOURCE #include #include static GPtrArray *signal_data = NULL; typedef struct _GUnixSignalData { guint source_id; GMainContext *context; gboolean triggered; gint signum; } GUnixSignalData; typedef struct _GUnixSignalSource { GSource source; GUnixSignalData *data; } GUnixSignalSource; static inline GUnixSignalData* get_signal_data(guint index) { return (GUnixSignalData*)g_ptr_array_index(signal_data, index); } static void handler(gint signum) { g_assert(signal_data != NULL); guint i; for (i = 0; i < signal_data->len; ++i) if (get_signal_data(i)->signum == signum) get_signal_data(i)->triggered = TRUE; struct sigaction action; action.sa_handler= handler; sigemptyset (&action.sa_mask); action.sa_flags = 0; sigaction(signum, &action, NULL); } static gboolean check(GSource *source) { GUnixSignalSource *signal_source = (GUnixSignalSource*) source; return signal_source->data->triggered; } static gboolean prepare(GSource *source, gint *timeout_) { GUnixSignalSource *signal_source = (GUnixSignalSource*) source; if (signal_source->data->context == NULL) { g_main_context_ref(signal_source->data->context = g_source_get_context(source)); signal_source->data->source_id = g_source_get_id(source); } *timeout_ = -1; return signal_source->data->triggered; } static gboolean dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { GUnixSignalSource *signal_source = (GUnixSignalSource*) source; signal_source->data->triggered = FALSE; return callback(user_data) ? TRUE : FALSE; } static void finalize(GSource *source) { GUnixSignalSource *signal_source = (GUnixSignalSource*) source; struct sigaction action; action.sa_handler= NULL; sigemptyset (&action.sa_mask); action.sa_flags = 0; sigaction(signal_source->data->signum, &action, NULL); g_main_context_unref(signal_source->data->context); g_ptr_array_remove_fast(signal_data, signal_source->data); if (signal_data->len == 0) signal_data = (GPtrArray*) g_ptr_array_free(signal_data, TRUE); g_free(signal_source->data); } static GSourceFuncs SourceFuncs = { .prepare = prepare, .check = check, .dispatch = dispatch, .finalize = finalize, .closure_callback = NULL, .closure_marshal = NULL }; static void g_unix_signal_source_init(GSource *source, gint signum) { GUnixSignalSource *signal_source = (GUnixSignalSource *) source; signal_source->data = g_new(GUnixSignalData, 1); signal_source->data->triggered = FALSE; signal_source->data->signum = signum; signal_source->data->context = NULL; if (signal_data == NULL) signal_data = g_ptr_array_new(); g_ptr_array_add(signal_data, signal_source->data); } GSource *g_unix_signal_source_new(gint signum) { GSource *source = g_source_new(&SourceFuncs, sizeof(GUnixSignalSource)); g_unix_signal_source_init(source, signum); struct sigaction action; action.sa_handler= handler; sigemptyset (&action.sa_mask); action.sa_flags = 0; sigaction(signum, &action, NULL); return source; } guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify) { g_return_val_if_fail(function != NULL, 0); GSource *source = g_unix_signal_source_new(signum); if (priority != G_PRIORITY_DEFAULT) g_source_set_priority (source, priority); g_source_set_callback(source, function, data, notify); guint id = g_source_attach(source, NULL); g_source_unref(source); return id; } guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data) { return g_unix_signal_add_full(G_PRIORITY_DEFAULT, signum, function, data, NULL); } mydumper-0.5.2/g_unix_signal.h0000644000000000000000000000052312052470045014504 0ustar 00000000000000#ifndef G_UNIX_SIGNAL_H #define G_UNIX_SIGNAL_H #include GSource *g_unix_signal_source_new(gint signum); guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data); guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify); #endif /* G_UNIX_SIGNAL_H */ mydumper-0.5.2/mydumper.c0000644000000000000000000012755312052470045013530 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Domas Mituzas, Facebook ( domas at fb dot com ) Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include #include #include #include #include #include #include #include #include #include #include #include #include #include "binlog.h" #include "server_detect.h" #include "common.h" #include "g_unix_signal.h" #include "config.h" char *regexstring=NULL; const char DIRECTORY[]= "export"; const char BINLOG_DIRECTORY[]= "binlog_snapshot"; const char DAEMON_BINLOGS[]= "binlogs"; static GMutex * init_mutex = NULL; /* Program options */ gchar *output_directory= NULL; guint statement_size= 1000000; guint rows_per_file= 0; int longquery= 60; int build_empty_files= 0; int need_dummy_read= 0; int compress_output= 0; int killqueries= 0; int detected_server= 0; guint snapshot_interval= 60; gboolean daemon_mode= FALSE; gchar *ignore_engines= NULL; char **ignore= NULL; gchar *tables_list= NULL; char **tables= NULL; gboolean need_binlogs= FALSE; gchar *binlog_directory= NULL; gchar *daemon_binlog_directory= NULL; gchar *logfile= NULL; FILE *logoutfile= NULL; gboolean no_schemas= FALSE; gboolean no_locks= FALSE; GList *innodb_tables= NULL; GList *non_innodb_table= NULL; GList *table_schemas= NULL; gint non_innodb_table_counter= 0; gint non_innodb_done= 0; // For daemon mode, 0 or 1 guint dump_number= 0; guint binlog_connect_id= 0; gboolean shutdown_triggered= FALSE; GAsyncQueue *start_scheduled_dump; GMainLoop *m1; int errors; static GOptionEntry entries[] = { { "database", 'B', 0, G_OPTION_ARG_STRING, &db, "Database to dump", NULL }, { "tables-list", 'T', 0, G_OPTION_ARG_STRING, &tables_list, "Comma delimited table list to dump (does not exclude regex option)", NULL }, { "outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &output_directory, "Directory to output files to", NULL }, { "statement-size", 's', 0, G_OPTION_ARG_INT, &statement_size, "Attempted size of INSERT statement in bytes, default 1000000", NULL}, { "rows", 'r', 0, G_OPTION_ARG_INT, &rows_per_file, "Try to split tables into chunks of this many rows", NULL}, { "compress", 'c', 0, G_OPTION_ARG_NONE, &compress_output, "Compress output files", NULL}, { "build-empty-files", 'e', 0, G_OPTION_ARG_NONE, &build_empty_files, "Build dump files even if no data available from table", NULL}, { "regex", 'x', 0, G_OPTION_ARG_STRING, ®exstring, "Regular expression for 'db.table' matching", NULL}, { "ignore-engines", 'i', 0, G_OPTION_ARG_STRING, &ignore_engines, "Comma delimited list of storage engines to ignore", NULL }, { "no-schemas", 'm', 0, G_OPTION_ARG_NONE, &no_schemas, "Do not dump table schemas with the data", NULL }, { "no-locks", 'k', 0, G_OPTION_ARG_NONE, &no_locks, "Do not execute the temporary shared read lock. WARNING: This will cause inconsistent backups", NULL }, { "long-query-guard", 'l', 0, G_OPTION_ARG_INT, &longquery, "Set long query timer in seconds, default 60", NULL }, { "kill-long-queries", 'k', 0, G_OPTION_ARG_NONE, &killqueries, "Kill long running queries (instead of aborting)", NULL }, { "binlogs", 'b', 0, G_OPTION_ARG_NONE, &need_binlogs, "Get a snapshot of the binary logs as well as dump data", NULL }, { "daemon", 'D', 0, G_OPTION_ARG_NONE, &daemon_mode, "Enable daemon mode", NULL }, { "snapshot-interval", 'I', 0, G_OPTION_ARG_INT, &snapshot_interval, "Interval between each dump snapshot (in minutes), requires --daemon, default 60", NULL }, { "logfile", 'L', 0, G_OPTION_ARG_FILENAME, &logfile, "Log file name to use, by default stdout is used", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } }; struct tm tval; void dump_schema_data(MYSQL *conn, char *database, char *table, char *filename); void dump_schema(char *database, char *table, struct configuration *conf); void dump_table(MYSQL *conn, char *database, char *table, struct configuration *conf, gboolean is_innodb); guint64 dump_table_data(MYSQL *, FILE *, char *, char *, char *); void dump_database(MYSQL *, char *); GList * get_chunks_for_table(MYSQL *, char *, char*, struct configuration *conf); guint64 estimate_count(MYSQL *conn, char *database, char *table, char *field, char *from, char *to); void dump_table_data_file(MYSQL *conn, char *database, char *table, char *where, char *filename); void create_backup_dir(char *directory); gboolean write_data(FILE *,GString*); gboolean check_regex(char *database, char *table); void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); void set_verbose(guint verbosity); MYSQL *reconnect_for_binlog(MYSQL *thrconn); void start_dump(MYSQL *conn); MYSQL *create_main_connection(); void *binlog_thread(void *data); void *exec_thread(void *data); void write_log_file(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { (void) log_domain; (void) log_level; (void) message; (void) user_data; } void set_verbose(guint verbosity) { if (logfile) { logoutfile = g_fopen(logfile, "w"); if (!logoutfile) { g_critical("Could not open log file '%s' for writing: %d", logfile, errno); exit(EXIT_FAILURE); } } switch (verbosity) { case 0: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), no_log, NULL); break; case 1: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE), no_log, NULL); if (logfile) g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_ERROR | G_LOG_LEVEL_CRITICAL), write_log_file, NULL); break; case 2: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MESSAGE), no_log, NULL); if (logfile) g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_ERROR | G_LOG_LEVEL_WARNING | G_LOG_LEVEL_ERROR | G_LOG_LEVEL_CRITICAL), write_log_file, NULL); break; default: if (logfile) g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), write_log_file, NULL); break; } } gboolean sig_triggered(gpointer user_data) { (void) user_data; g_message("Shutting down gracefully"); shutdown_triggered= TRUE; g_main_loop_quit(m1); return FALSE; } void clear_dump_directory() { GError *error= NULL; char* dump_directory= g_strdup_printf("%s/%d", output_directory, dump_number); GDir* dir= g_dir_open(dump_directory, 0, &error); if (error) { g_critical("cannot open directory %s, %s\n", dump_directory, error->message); errors++; return; } const gchar* filename= NULL; while((filename= g_dir_read_name(dir))) { gchar* path= g_build_filename(dump_directory, filename, NULL); if (g_unlink(path) == -1) { g_critical("error removing file %s (%d)\n", path, errno); errors++; return; } g_free(path); } g_dir_close(dir); g_free(dump_directory); } gboolean run_snapshot(gpointer *data) { (void) data; g_async_queue_push(start_scheduled_dump,GINT_TO_POINTER(1)); return (shutdown_triggered) ? FALSE : TRUE; } /* Check database.table string against regular expression */ gboolean check_regex(char *database, char *table) { /* This is not going to be used in threads */ static pcre *re = NULL; int rc; int ovector[9]= {0}; const char *error; int erroroffset; char *p; /* Let's compile the RE before we do anything */ if (!re) { re = pcre_compile(regexstring,PCRE_CASELESS|PCRE_MULTILINE,&error,&erroroffset,NULL); if(!re) { g_critical("Regular expression fail: %s", error); exit(EXIT_FAILURE); } } p=g_strdup_printf("%s.%s",database,table); rc = pcre_exec(re,NULL,p,strlen(p),0,0,ovector,9); g_free(p); return (rc>0)?TRUE:FALSE; } /* Write some stuff we know about snapshot, before it changes */ void write_snapshot_info(MYSQL *conn, FILE *file) { MYSQL_RES *master=NULL, *slave=NULL; MYSQL_FIELD *fields; MYSQL_ROW row; char *masterlog=NULL; char *masterpos=NULL; char *slavehost=NULL; char *slavelog=NULL; char *slavepos=NULL; mysql_query(conn,"SHOW MASTER STATUS"); master=mysql_store_result(conn); if (master && (row=mysql_fetch_row(master))) { masterlog=row[0]; masterpos=row[1]; } mysql_query(conn, "SHOW SLAVE STATUS"); slave=mysql_store_result(conn); guint i; if (slave && (row=mysql_fetch_row(slave))) { fields=mysql_fetch_fields(slave); for (i=0; iconf; // mysql_init is not thread safe, especially in Connector/C g_mutex_lock(init_mutex); MYSQL *thrconn = mysql_init(NULL); g_mutex_unlock(init_mutex); mysql_options(thrconn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); if (compress_protocol) mysql_options(thrconn,MYSQL_OPT_COMPRESS,NULL); if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { g_critical("Failed to connect to database: %s", mysql_error(thrconn)); exit(EXIT_FAILURE); } else { g_message("Thread %d connected using MySQL connection ID %lu", td->thread_id, mysql_thread_id(thrconn)); } if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")){ g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn)); } if (mysql_query(thrconn, "SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ")) { g_warning("Failed to set isolation level: %s", mysql_error(thrconn)); } if (mysql_query(thrconn, "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */")) { g_critical("Failed to start consistent snapshot: %s",mysql_error(thrconn)); errors++; } /* Unfortunately version before 4.1.8 did not support consistent snapshot transaction starts, so we cheat */ if (need_dummy_read) { mysql_query(thrconn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.mydumperdummy"); MYSQL_RES *res=mysql_store_result(thrconn); if (res) mysql_free_result(res); } mysql_query(thrconn, "/*!40101 SET NAMES binary*/"); g_async_queue_push(conf->ready,GINT_TO_POINTER(1)); struct job* job= NULL; struct table_job* tj= NULL; struct schema_job* sj= NULL; struct binlog_job* bj= NULL; for(;;) { GTimeVal tv; g_get_current_time(&tv); g_time_val_add(&tv,1000*1000*1); job=(struct job *)g_async_queue_pop(conf->queue); if (shutdown_triggered && (job->type != JOB_SHUTDOWN)) { continue; } switch (job->type) { case JOB_DUMP: tj=(struct table_job *)job->job_data; if (tj->where) g_message("Thread %d dumping data for `%s`.`%s` where %s", td->thread_id, tj->database, tj->table, tj->where); else g_message("Thread %d dumping data for `%s`.`%s`", td->thread_id, tj->database, tj->table); dump_table_data_file(thrconn, tj->database, tj->table, tj->where, tj->filename); if(tj->database) g_free(tj->database); if(tj->table) g_free(tj->table); if(tj->where) g_free(tj->where); if(tj->filename) g_free(tj->filename); g_free(tj); g_free(job); break; case JOB_DUMP_NON_INNODB: tj=(struct table_job *)job->job_data; if (tj->where) g_message("Thread %d dumping data for `%s`.`%s` where %s", td->thread_id, tj->database, tj->table, tj->where); else g_message("Thread %d dumping data for `%s`.`%s`", td->thread_id, tj->database, tj->table); dump_table_data_file(thrconn, tj->database, tj->table, tj->where, tj->filename); if(tj->database) g_free(tj->database); if(tj->table) g_free(tj->table); if(tj->where) g_free(tj->where); if(tj->filename) g_free(tj->filename); g_free(tj); g_free(job); if (g_atomic_int_dec_and_test(&non_innodb_table_counter) && g_atomic_int_get(&non_innodb_done)) { g_async_queue_push(conf->unlock_tables, GINT_TO_POINTER(1)); } break; case JOB_SCHEMA: sj=(struct schema_job *)job->job_data; g_message("Thread %d dumping schema for `%s`.`%s`", td->thread_id, sj->database, sj->table); dump_schema_data(thrconn, sj->database, sj->table, sj->filename); if(sj->database) g_free(sj->database); if(sj->table) g_free(sj->table); if(sj->filename) g_free(sj->filename); g_free(sj); g_free(job); break; case JOB_BINLOG: thrconn= reconnect_for_binlog(thrconn); g_message("Thread %d connected using MySQL connection ID %lu (in binlog mode)", td->thread_id, mysql_thread_id(thrconn)); bj=(struct binlog_job *)job->job_data; g_message("Thread %d dumping binary log file %s", td->thread_id, bj->filename); get_binlog_file(thrconn, bj->filename, binlog_directory, bj->start_position, bj->stop_position, FALSE); if(bj->filename) g_free(bj->filename); g_free(bj); g_free(job); break; case JOB_SHUTDOWN: g_message("Thread %d shutting down", td->thread_id); if (thrconn) mysql_close(thrconn); g_free(job); mysql_thread_end(); return NULL; break; default: g_critical("Something very bad happened!"); exit(EXIT_FAILURE); } } if (thrconn) mysql_close(thrconn); mysql_thread_end(); return NULL; } MYSQL *reconnect_for_binlog(MYSQL *thrconn) { if (thrconn) { mysql_close(thrconn); } g_mutex_lock(init_mutex); thrconn= mysql_init(NULL); g_mutex_unlock(init_mutex); if (compress_protocol) mysql_options(thrconn,MYSQL_OPT_COMPRESS,NULL); int timeout= 1; mysql_options(thrconn, MYSQL_OPT_READ_TIMEOUT, (const char*)&timeout); if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { g_critical("Failed to re-connect to database: %s", mysql_error(thrconn)); exit(EXIT_FAILURE); } return thrconn; } int main(int argc, char *argv[]) { GError *error = NULL; GOptionContext *context; g_thread_init(NULL); init_mutex = g_mutex_new(); context = g_option_context_new("multi-threaded MySQL dumping"); GOptionGroup *main_group= g_option_group_new("main", "Main Options", "Main Options", NULL, NULL); g_option_group_add_entries(main_group, entries); g_option_group_add_entries(main_group, common_entries); g_option_context_set_main_group(context, main_group); if (!g_option_context_parse(context, &argc, &argv, &error)) { g_print ("option parsing failed: %s, try --help\n", error->message); exit (EXIT_FAILURE); } g_option_context_free(context); if (program_version) { g_print("mydumper %s, built against MySQL %s\n", VERSION, MYSQL_SERVER_VERSION); exit (EXIT_SUCCESS); } set_verbose(verbose); time_t t; time(&t);localtime_r(&t,&tval); if (!output_directory) output_directory = g_strdup_printf("%s-%04d%02d%02d-%02d%02d%02d",DIRECTORY, tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, tval.tm_hour, tval.tm_min, tval.tm_sec); create_backup_dir(output_directory); if (daemon_mode) { pid_t pid, sid; pid= fork(); if (pid < 0) exit(EXIT_FAILURE); else if (pid > 0) exit(EXIT_SUCCESS); umask(0); sid= setsid(); if (sid < 0) exit(EXIT_FAILURE); char *dump_directory= g_strdup_printf("%s/0", output_directory); create_backup_dir(dump_directory); g_free(dump_directory); dump_directory= g_strdup_printf("%s/1", output_directory); create_backup_dir(dump_directory); g_free(dump_directory); daemon_binlog_directory= g_strdup_printf("%s/%s", output_directory, DAEMON_BINLOGS); create_backup_dir(daemon_binlog_directory); } if (need_binlogs) { binlog_directory = g_strdup_printf("%s/%s", output_directory, BINLOG_DIRECTORY); create_backup_dir(binlog_directory); } /* Give ourselves an array of engines to ignore */ if (ignore_engines) ignore = g_strsplit(ignore_engines, ",", 0); /* Give ourselves an array of tables to dump */ if (tables_list) tables = g_strsplit(tables_list, ",", 0); if (daemon_mode) { GError* terror; GThread *bthread= g_thread_create(binlog_thread, GINT_TO_POINTER(1), FALSE, &terror); if (bthread == NULL) { g_critical("Could not create binlog thread: %s", terror->message); g_error_free(terror); exit(EXIT_FAILURE); } start_scheduled_dump= g_async_queue_new(); GThread *ethread= g_thread_create(exec_thread, GINT_TO_POINTER(1), FALSE, &terror); if (ethread == NULL) { g_critical("Could not create exec thread: %s", terror->message); g_error_free(terror); exit(EXIT_FAILURE); } // Run initial snapshot run_snapshot(NULL); #if GLIB_MINOR_VERSION < 14 g_timeout_add(snapshot_interval*60*1000, (GSourceFunc) run_snapshot, NULL); #else g_timeout_add_seconds(snapshot_interval*60, (GSourceFunc) run_snapshot, NULL); #endif guint sigsource= g_unix_signal_add(SIGINT, sig_triggered, NULL); sigsource= g_unix_signal_add(SIGTERM, sig_triggered, NULL); m1= g_main_loop_new(NULL, TRUE); g_main_loop_run(m1); g_source_remove(sigsource); } else { MYSQL *conn= create_main_connection(); start_dump(conn); mysql_close(conn); } sleep(5); mysql_thread_end(); mysql_library_end(); g_free(output_directory); g_strfreev(ignore); g_strfreev(tables); if (logoutfile) { fclose(logoutfile); } exit(errors ? EXIT_FAILURE : EXIT_SUCCESS); } MYSQL *create_main_connection() { MYSQL *conn; conn = mysql_init(NULL); mysql_options(conn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); if (!mysql_real_connect(conn, hostname, username, password, db, port, socket_path, 0)) { g_critical("Error connecting to database: %s", mysql_error(conn)); exit(EXIT_FAILURE); } if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(conn, "SET SESSION wait_timeout = 2147483")){ g_warning("Failed to increase wait_timeout: %s", mysql_error(conn)); } if ((detected_server == SERVER_TYPE_MYSQL) && mysql_query(conn, "SET SESSION net_write_timeout = 2147483")){ g_warning("Failed to increase net_write_timeout: %s", mysql_error(conn)); } detected_server= detect_server(conn); switch (detected_server) { case SERVER_TYPE_MYSQL: g_message("Connected to a MySQL server"); break; case SERVER_TYPE_DRIZZLE: g_message("Connected to a Drizzle server"); break; default: g_critical("Cannot detect server type"); exit(EXIT_FAILURE); break; } return conn; } void *exec_thread(void *data) { (void) data; while(1) { g_async_queue_pop(start_scheduled_dump); clear_dump_directory(); MYSQL *conn= create_main_connection(); start_dump(conn); mysql_close(conn); mysql_thread_end(); // Don't switch the symlink on shutdown because the dump is probably incomplete. if (!shutdown_triggered) { const char *dump_symlink_source= (dump_number == 0) ? "0" : "1"; char *dump_symlink_dest= g_strdup_printf("%s/last_dump", output_directory); // We don't care if this fails g_unlink(dump_symlink_dest); if (symlink(dump_symlink_source, dump_symlink_dest) == -1) { g_critical("error setting last good dump symlink %s, %d", dump_symlink_dest, errno); } g_free(dump_symlink_dest); dump_number= (dump_number == 1) ? 0 : 1; } } return NULL; } void *binlog_thread(void *data) { (void) data; MYSQL_RES *master= NULL; MYSQL_ROW row; MYSQL *conn; conn = mysql_init(NULL); mysql_options(conn,MYSQL_READ_DEFAULT_GROUP,"mydumper"); if (!mysql_real_connect(conn, hostname, username, password, db, port, socket_path, 0)) { g_critical("Error connecting to database: %s", mysql_error(conn)); exit(EXIT_FAILURE); } mysql_query(conn,"SHOW MASTER STATUS"); master= mysql_store_result(conn); if (master && (row= mysql_fetch_row(master))) { MYSQL *binlog_connection= NULL; binlog_connection= reconnect_for_binlog(binlog_connection); binlog_connect_id= mysql_thread_id(binlog_connection); guint64 start_position= g_ascii_strtoull(row[1], NULL, 10); gchar* filename= g_strdup(row[0]); mysql_free_result(master); mysql_close(conn); g_message("Continuous binlog thread connected using MySQL connection ID %lu", mysql_thread_id(binlog_connection)); get_binlog_file(binlog_connection, filename, daemon_binlog_directory, start_position, 0, TRUE); g_free(filename); mysql_close(binlog_connection); } else { mysql_free_result(master); mysql_close(conn); } g_message("Continuous binlog thread shutdown"); mysql_thread_end(); return NULL; } void start_dump(MYSQL *conn) { struct configuration conf = { 1, NULL, NULL, NULL, NULL, 0 }; char *p; time_t t; if (daemon_mode) p= g_strdup_printf("%s/%d/metadata", output_directory, dump_number); else p= g_strdup_printf("%s/metadata", output_directory); FILE* mdfile=g_fopen(p,"w"); g_free(p); if(!mdfile) { g_critical("Couldn't write metadata file (%d)",errno); exit(EXIT_FAILURE); } /* We check SHOW PROCESSLIST, and if there're queries larger than preset value, we terminate the process. This avoids stalling whole server with flush */ if (mysql_query(conn, "SHOW PROCESSLIST")) { g_warning("Could not check PROCESSLIST, no long query guard enabled: %s", mysql_error(conn)); } else { MYSQL_RES *res = mysql_store_result(conn); MYSQL_ROW row; /* Just in case PROCESSLIST output column order changes */ MYSQL_FIELD *fields = mysql_fetch_fields(res); guint i; int tcol=-1, ccol=-1, icol=-1; for(i=0; ilongquery) { if (killqueries) { if (mysql_query(conn,p=g_strdup_printf("KILL %lu",atol(row[icol])))) g_warning("Could not KILL slow query: %s",mysql_error(conn)); else g_warning("Killed a query that was running for %ss",row[tcol]); g_free(p); } else { g_critical("There are queries in PROCESSLIST running longer than %us, aborting dump,\n\t" "use --long-query-guard to change the guard value, kill queries (--kill-long-queries) or use \n\tdifferent server for dump", longquery); exit(EXIT_FAILURE); } } } mysql_free_result(res); } if (!no_locks) { if (mysql_query(conn, "FLUSH TABLES WITH READ LOCK")) { g_critical("Couldn't acquire global lock, snapshots will not be consistent: %s",mysql_error(conn)); errors++; } } else { g_warning("Executing in no-locks mode, snapshot will notbe consistent"); } if (mysql_get_server_version(conn)) { mysql_query(conn, "CREATE TABLE IF NOT EXISTS mysql.mydumperdummy (a INT) ENGINE=INNODB"); need_dummy_read=1; } mysql_query(conn, "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */"); if (need_dummy_read) { mysql_query(conn,"SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.mydumperdummy"); MYSQL_RES *res=mysql_store_result(conn); if (res) mysql_free_result(res); } time(&t); localtime_r(&t,&tval); fprintf(mdfile,"Started dump at: %04d-%02d-%02d %02d:%02d:%02d\n", tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, tval.tm_hour, tval.tm_min, tval.tm_sec); g_message("Started dump at: %04d-%02d-%02d %02d:%02d:%02d\n", tval.tm_year+1900, tval.tm_mon+1, tval.tm_mday, tval.tm_hour, tval.tm_min, tval.tm_sec); if (detected_server == SERVER_TYPE_MYSQL) { mysql_query(conn, "/*!40101 SET NAMES binary*/"); write_snapshot_info(conn, mdfile); } conf.queue = g_async_queue_new(); conf.ready = g_async_queue_new(); conf.unlock_tables= g_async_queue_new(); guint n; GThread **threads = g_new(GThread*,num_threads); struct thread_data *td= g_new(struct thread_data, num_threads); for (n=0; ndata; dump_table(conn, dbt->database, dbt->table, &conf, FALSE); g_atomic_int_inc(&non_innodb_table_counter); } g_list_free(g_list_first(non_innodb_table)); g_atomic_int_inc(&non_innodb_done); for (innodb_tables= g_list_first(innodb_tables); innodb_tables; innodb_tables= g_list_next(innodb_tables)) { dbt= (struct db_table*) innodb_tables->data; dump_table(conn, dbt->database, dbt->table, &conf, TRUE); } g_list_free(g_list_first(innodb_tables)); for (table_schemas= g_list_first(table_schemas); table_schemas; table_schemas= g_list_next(table_schemas)) { dbt= (struct db_table*) table_schemas->data; dump_schema(dbt->database, dbt->table, &conf); g_free(dbt->table); g_free(dbt->database); g_free(dbt); } g_list_free(g_list_first(table_schemas)); if (need_binlogs) { get_binlogs(conn, &conf); } for (n=0; ntype = JOB_SHUTDOWN; g_async_queue_push(conf.queue,j); } g_async_queue_pop(conf.unlock_tables); if (!no_locks) { g_message("Non-InnoDB dump complete, unlocking tables"); mysql_query(conn, "UNLOCK TABLES"); } for (n=0; nuse_any_index) { guint64 max_cardinality=0; guint64 cardinality=0; mysql_data_seek(indexes,0); while ((row=mysql_fetch_row(indexes))) { if(!strcmp(row[3],"1")) { if (row[6]) cardinality = strtoll(row[6],NULL,10); if (cardinality>max_cardinality) { field=row[4]; max_cardinality=cardinality; } } } } /* Oh well, no chunks today - no suitable index */ if (!field) goto cleanup; /* Get minimum/maximum */ mysql_query(conn, query=g_strdup_printf("SELECT %s MIN(`%s`),MAX(`%s`) FROM `%s`.`%s`", (detected_server == SERVER_TYPE_MYSQL) ? "/*!40001 SQL_NO_CACHE */" : "", field, field, database, table)); g_free(query); minmax=mysql_store_result(conn); if (!minmax) goto cleanup; row=mysql_fetch_row(minmax); MYSQL_FIELD * fields=mysql_fetch_fields(minmax); char *min=row[0]; char *max=row[1]; /* Got total number of rows, skip chunk logic if estimates are low */ guint64 rows = estimate_count(conn, database, table, field, NULL, NULL); if (rows <= rows_per_file) goto cleanup; /* This is estimate, not to use as guarantee! Every chunk would have eventual adjustments */ guint64 estimated_chunks = rows / rows_per_file; guint64 estimated_step, nmin, nmax, cutoff; /* Support just bigger INTs for now, very dumb, no verify approach */ switch (fields[0].type) { case MYSQL_TYPE_LONG: case MYSQL_TYPE_LONGLONG: case MYSQL_TYPE_INT24: /* static stepping */ nmin = strtoll(min,NULL,10); nmax = strtoll(max,NULL,10); estimated_step = (nmax-nmin)/estimated_chunks+1; cutoff = nmin; while(cutoff<=nmax) { chunks=g_list_append(chunks,g_strdup_printf("%s%s(`%s` >= %llu AND `%s` < %llu)", !showed_nulls?field:"", !showed_nulls?" IS NULL OR ":"", field, (unsigned long long)cutoff, field, (unsigned long long)(cutoff+estimated_step))); cutoff+=estimated_step; showed_nulls=1; } default: goto cleanup; } cleanup: if (indexes) mysql_free_result(indexes); if (minmax) mysql_free_result(minmax); if (total) mysql_free_result(total); return chunks; } /* Try to get EXPLAIN'ed estimates of row in resultset */ guint64 estimate_count(MYSQL *conn, char *database, char *table, char *field, char *from, char *to) { char *querybase, *query; int ret; g_assert(conn && database && table); querybase = g_strdup_printf("EXPLAIN SELECT `%s` FROM `%s`.`%s`", (field?field:"*"), database, table); if (from || to) { g_assert(field != NULL); char *fromclause=NULL, *toclause=NULL; char *escaped; if (from) { escaped=g_new(char,strlen(from)*2+1); mysql_real_escape_string(conn,escaped,from,strlen(from)); fromclause = g_strdup_printf(" `%s` >= \"%s\" ", field, escaped); g_free(escaped); } if (to) { escaped=g_new(char,strlen(to)*2+1); mysql_real_escape_string(conn,escaped,from,strlen(from)); toclause = g_strdup_printf( " `%s` <= \"%s\"", field, escaped); g_free(escaped); } query = g_strdup_printf("%s WHERE `%s` %s %s", querybase, (from?fromclause:""), ((from&&to)?"AND":""), (to?toclause:"")); if (toclause) g_free(toclause); if (fromclause) g_free(fromclause); ret=mysql_query(conn,query); g_free(querybase); g_free(query); } else { ret=mysql_query(conn,querybase); g_free(querybase); } if (ret) { g_warning("Unable to get estimates for %s.%s: %s",database,table,mysql_error(conn)); } MYSQL_RES * result = mysql_store_result(conn); MYSQL_FIELD * fields = mysql_fetch_fields(result); guint i; for (i=0; i1 kicks in only in case of 5.0 SHOW FULL TABLES or SHOW TABLE STATUS row[1] == NULL if it is a view in 5.0 'SHOW TABLE STATUS' row[1] == "VIEW" if it is a view in 5.0 'SHOW FULL TABLES' */ if ((detected_server == SERVER_TYPE_MYSQL) && ( row[ccol] == NULL || !strcmp(row[ccol],"VIEW") )) continue; /* Skip ignored engines, handy for avoiding Merge, Federated or Blackhole :-) dumps */ if (ignore) { for (i = 0; ignore[i] != NULL; i++) { if (g_ascii_strcasecmp(ignore[i], row[ecol]) == 0) { dump = 0; break; } } } if (!dump) continue; /* In case of table-list option is enabled, check if table is part of the list */ if (tables) { int table_found=0; for (i = 0; tables[i] != NULL; i++) if (g_ascii_strcasecmp(tables[i], row[0]) == 0) table_found = 1; if (!table_found) dump = 0; } if (!dump) continue; /* Checks PCRE expressions on 'database.table' string */ if (regexstring && !check_regex(database,row[0])) continue; /* Green light! */ struct db_table *dbt = g_new(struct db_table, 1); dbt->database= g_strdup(database); dbt->table= g_strdup(row[0]); if (!g_ascii_strcasecmp("InnoDB", row[ecol])) { innodb_tables= g_list_append(innodb_tables, dbt); } else { non_innodb_table= g_list_append(non_innodb_table, dbt); } if (!no_schemas) { table_schemas= g_list_append(table_schemas, dbt); } } mysql_free_result(result); } void dump_schema_data(MYSQL *conn, char *database, char *table, char *filename) { void *outfile; char *query = NULL; MYSQL_RES *result = NULL; MYSQL_ROW row; if (!compress_output) outfile= g_fopen(filename, "w"); else outfile= (void*) gzopen(filename, "w"); if (!outfile) { g_critical("Error: DB: %s Could not create output file %s (%d)", database, filename, errno); errors++; return; } GString* statement = g_string_sized_new(statement_size); if (detected_server == SERVER_TYPE_MYSQL) { g_string_printf(statement,"/*!40101 SET NAMES binary*/;\n"); g_string_append(statement,"/*!40014 SET FOREIGN_KEY_CHECKS=0*/;\n\n"); } else { g_string_printf(statement, "SET FOREIGN_KEY_CHECKS=0;\n"); } if (!write_data((FILE *)outfile,statement)) { g_critical("Could not write schema data for %s.%s", database, table); errors++; return; } query= g_strdup_printf("SHOW CREATE TABLE `%s`.`%s`", database, table); if (mysql_query(conn, query) || !(result= mysql_use_result(conn))) { g_critical("Error dumping schemas (%s.%s): %s", database, table, mysql_error(conn)); g_free(query); errors++; return; } g_string_set_size(statement, 0); /* There should never be more than one row */ row = mysql_fetch_row(result); g_string_append(statement, row[1]); g_string_append(statement, ";\n"); if (!write_data((FILE *)outfile, statement)) { g_critical("Could not write schema for %s.%s", database, table); errors++; } g_free(query); if (!compress_output) fclose((FILE *)outfile); else gzclose((gzFile)outfile); g_string_free(statement, TRUE); if (result) mysql_free_result(result); return; } void dump_table_data_file(MYSQL *conn, char *database, char *table, char *where, char *filename) { void *outfile; if (!compress_output) outfile = g_fopen(filename, "w"); else outfile = (void*) gzopen(filename, "w"); if (!outfile) { g_critical("Error: DB: %s TABLE: %s Could not create output file %s (%d)", database, table, filename, errno); errors++; return; } guint64 rows_count = dump_table_data(conn, (FILE *)outfile, database, table, where); if (!compress_output) fclose((FILE *)outfile); else gzclose((gzFile)outfile); if (!rows_count && !build_empty_files) { // dropping the useless file if (remove(filename)) { g_warning("failed to remove empty file : %s\n", filename); return; } } } void dump_schema(char *database, char *table, struct configuration *conf) { struct job *j = g_new0(struct job,1); struct schema_job *sj = g_new0(struct schema_job,1); j->job_data=(void*) sj; sj->database=g_strdup(database); sj->table=g_strdup(table); j->conf=conf; j->type=JOB_SCHEMA; if (daemon_mode) sj->filename = g_strdup_printf("%s/%d/%s.%s-schema.sql%s", output_directory, dump_number, database, table, (compress_output?".gz":"")); else sj->filename = g_strdup_printf("%s/%s.%s-schema.sql%s", output_directory, database, table, (compress_output?".gz":"")); g_async_queue_push(conf->queue,j); return; } void dump_table(MYSQL *conn, char *database, char *table, struct configuration *conf, gboolean is_innodb) { GList * chunks = NULL; if (rows_per_file) chunks = get_chunks_for_table(conn, database, table, conf); if (chunks) { int nchunk=0; for (chunks = g_list_first(chunks); chunks; chunks=g_list_next(chunks)) { struct job *j = g_new0(struct job,1); struct table_job *tj = g_new0(struct table_job,1); j->job_data=(void*) tj; tj->database=g_strdup(database); tj->table=g_strdup(table); j->conf=conf; j->type= is_innodb ? JOB_DUMP : JOB_DUMP_NON_INNODB; if (daemon_mode) tj->filename=g_strdup_printf("%s/%d/%s.%s.%05d.sql%s", output_directory, dump_number, database, table, nchunk,(compress_output?".gz":"")); else tj->filename=g_strdup_printf("%s/%s.%s.%05d.sql%s", output_directory, database, table, nchunk,(compress_output?".gz":"")); tj->where=(char *)chunks->data; g_async_queue_push(conf->queue,j); nchunk++; } g_list_free(g_list_first(chunks)); } else { struct job *j = g_new0(struct job,1); struct table_job *tj = g_new0(struct table_job,1); j->job_data=(void*) tj; tj->database=g_strdup(database); tj->table=g_strdup(table); j->conf=conf; j->type= is_innodb ? JOB_DUMP : JOB_DUMP_NON_INNODB; if (daemon_mode) tj->filename = g_strdup_printf("%s/%d/%s.%s.sql%s", output_directory, dump_number, database, table,(compress_output?".gz":"")); else tj->filename = g_strdup_printf("%s/%s.%s.sql%s", output_directory, database, table,(compress_output?".gz":"")); g_async_queue_push(conf->queue,j); return; } } /* Do actual data chunk reading/writing magic */ guint64 dump_table_data(MYSQL * conn, FILE *file, char *database, char *table, char *where) { guint i; guint num_fields = 0; guint64 num_rows = 0; MYSQL_RES *result = NULL; char *query = NULL; /* Ghm, not sure if this should be statement_size - but default isn't too big for now */ GString* statement = g_string_sized_new(statement_size); if (detected_server == SERVER_TYPE_MYSQL) { g_string_printf(statement,"/*!40101 SET NAMES binary*/;\n"); g_string_append(statement,"/*!40014 SET FOREIGN_KEY_CHECKS=0*/;\n"); } else { g_string_printf(statement,"SET FOREIGN_KEY_CHECKS=0;\n"); } if (!write_data(file,statement)) { g_critical("Could not write out data for %s.%s", database, table); return num_rows; } /* Poor man's database code */ query = g_strdup_printf("SELECT %s * FROM `%s`.`%s` %s %s", (detected_server == SERVER_TYPE_MYSQL) ? "/*!40001 SQL_NO_CACHE */" : "", database, table, where?"WHERE":"",where?where:""); if (mysql_query(conn, query) || !(result=mysql_use_result(conn))) { g_critical("Error dumping table (%s.%s) data: %s ",database, table, mysql_error(conn)); g_free(query); errors++; return num_rows; } num_fields = mysql_num_fields(result); MYSQL_FIELD *fields = mysql_fetch_fields(result); /* Buffer for escaping field values */ GString *escaped = g_string_sized_new(3000); MYSQL_ROW row; g_string_set_size(statement,0); /* Poor man's data dump code */ while ((row = mysql_fetch_row(result))) { gulong *lengths = mysql_fetch_lengths(result); num_rows++; if (!statement->len) g_string_printf(statement, "INSERT INTO `%s` VALUES\n(", table); else g_string_append(statement, ",\n("); for (i = 0; i < num_fields; i++) { /* Don't escape safe formats, saves some time */ if (!row[i]) { g_string_append(statement, "NULL"); } else if (fields[i].flags & NUM_FLAG) { g_string_append(statement, row[i]); } else { /* We reuse buffers for string escaping, growing is expensive just at the beginning */ g_string_set_size(escaped, lengths[i]*2+1); mysql_real_escape_string(conn, escaped->str, row[i], lengths[i]); g_string_append_c(statement,'\"'); g_string_append(statement,escaped->str); g_string_append_c(statement,'\"'); } if (i < num_fields - 1) { g_string_append_c(statement,','); } else { /* INSERT statement is closed once over limit */ if (statement->len > statement_size) { g_string_append(statement,");\n"); if (!write_data(file,statement)) { g_critical("Could not write out data for %s.%s", database, table); goto cleanup; } g_string_set_size(statement,0); } else { g_string_append_c(statement,')'); } } } } if (mysql_errno(conn)) { g_critical("Could not read data from %s.%s: %s", database, table, mysql_error(conn)); } if (statement->len > 0) { if (!write_data(file,statement)) { g_critical("Could not write out data for %s.%s", database, table); goto cleanup; } g_string_printf(statement,";\n"); if (!write_data(file,statement)) { g_critical("Could not write out closing newline for %s.%s, now this is sad!", database, table); goto cleanup; } } cleanup: g_free(query); g_string_free(escaped,TRUE); g_string_free(statement,TRUE); if (result) { mysql_free_result(result); } return num_rows; } gboolean write_data(FILE* file,GString * data) { size_t written= 0; ssize_t r= 0; while (written < data->len) { if (!compress_output) r = write(fileno(file), data->str + written, data->len); else r = gzwrite((gzFile)file, data->str + written, data->len); if (r < 0) { g_critical("Couldn't write data to a file: %s", strerror(errno)); errors++; return FALSE; } written += r; } return TRUE; } void write_log_file(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { (void) log_domain; (void) user_data; gchar date[20]; time_t rawtime; struct tm timeinfo; time(&rawtime); localtime_r(&rawtime, &timeinfo); strftime(date, 20, "%Y-%m-%d %H:%M:%S", &timeinfo); GString* message_out = g_string_new(date); if (log_level & G_LOG_LEVEL_DEBUG) { g_string_append(message_out, " [DEBUG] - "); } else if ((log_level & G_LOG_LEVEL_INFO) || (log_level & G_LOG_LEVEL_MESSAGE)) { g_string_append(message_out, " [INFO] - "); } else if (log_level & G_LOG_LEVEL_WARNING) { g_string_append(message_out, " [WARNING] - "); } else if ((log_level & G_LOG_LEVEL_ERROR) || (log_level & G_LOG_LEVEL_CRITICAL)) { g_string_append(message_out, " [ERROR] - "); } g_string_append_printf(message_out, "%s\n", message); if (write(fileno(logoutfile), message_out->str, message_out->len) <= 0) { fprintf(stderr, "Cannot write to log file with error %d. Exiting...", errno); } g_string_free(message_out, TRUE); } mydumper-0.5.2/mydumper.h0000644000000000000000000000323212052470045013520 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Domas Mituzas, Facebook ( domas at fb dot com ) Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #ifndef _mydumper_h #define _mydumper_h enum job_type { JOB_SHUTDOWN, JOB_RESTORE, JOB_DUMP, JOB_DUMP_NON_INNODB, JOB_SCHEMA, JOB_BINLOG }; struct configuration { char use_any_index; GAsyncQueue* queue; GAsyncQueue* ready; GAsyncQueue* unlock_tables; GMutex* mutex; int done; }; struct thread_data { struct configuration *conf; guint thread_id; }; struct job { enum job_type type; void *job_data; struct configuration *conf; }; struct table_job { char *database; char *table; char *filename; char *where; }; struct schema_job { char *database; char *table; char *filename; }; struct restore_job { char *database; char *table; char *filename; }; struct binlog_job { char *filename; guint64 start_position; guint64 stop_position; }; struct db_table { char* database; char* table; }; #endif mydumper-0.5.2/myloader.c0000644000000000000000000003047312052470045013474 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "myloader.h" #include "config.h" guint commit_count= 1000; gchar *directory= NULL; gboolean overwrite_tables= FALSE; gboolean enable_binlog= FALSE; static GMutex *init_mutex= NULL; guint errors= 0; gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof); void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema); void *process_queue(struct thread_data *td); void add_table(const gchar* filename, struct configuration *conf); void add_schema(const gchar* filename, MYSQL *conn); void restore_databases(struct configuration *conf, MYSQL *conn); void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); void set_verbose(guint verbosity); static GOptionEntry entries[] = { { "directory", 'd', 0, G_OPTION_ARG_STRING, &directory, "Directory of the dump to import", NULL }, { "queries-per-transaction", 'q', 0, G_OPTION_ARG_INT, &commit_count, "Number of queries per transaction, default 1000", NULL }, { "overwrite-tables", 'o', 0, G_OPTION_ARG_NONE, &overwrite_tables, "Drop tables if they already exist", NULL }, { "database", 'B', 0, G_OPTION_ARG_STRING, &db, "An alternative database to restore into", NULL }, { "enable-binlog", 'e', 0, G_OPTION_ARG_NONE, &enable_binlog, "Enable binary logging of the restore data", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } }; void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { (void) log_domain; (void) log_level; (void) message; (void) user_data; } void set_verbose(guint verbosity) { switch (verbosity) { case 0: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), no_log, NULL); break; case 1: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE), no_log, NULL); break; case 2: g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MESSAGE), no_log, NULL); break; default: break; } } int main(int argc, char *argv[]) { struct configuration conf= { NULL, NULL, NULL, 0 }; GError *error= NULL; GOptionContext *context; g_thread_init(NULL); init_mutex= g_mutex_new(); context= g_option_context_new("multi-threaded MySQL loader"); GOptionGroup *main_group= g_option_group_new("main", "Main Options", "Main Options", NULL, NULL); g_option_group_add_entries(main_group, entries); g_option_group_add_entries(main_group, common_entries); g_option_context_set_main_group(context, main_group); if (!g_option_context_parse(context, &argc, &argv, &error)) { g_print("option parsing failed: %s, try --help\n", error->message); exit(EXIT_FAILURE); } g_option_context_free(context); if (program_version) { g_print("myloader %s, built against MySQL %s\n", VERSION, MYSQL_SERVER_VERSION); exit(EXIT_SUCCESS); } set_verbose(verbose); if (!directory) { g_critical("a directory needs to be specified, see --help\n"); exit(EXIT_FAILURE); } else { char *p= g_strdup_printf("%s/metadata", directory); if (!g_file_test(p, G_FILE_TEST_EXISTS)) { g_critical("the specified directory is not a mydumper backup\n"); exit(EXIT_FAILURE); } } MYSQL *conn; conn= mysql_init(NULL); mysql_options(conn, MYSQL_READ_DEFAULT_GROUP, "myloader"); if (!mysql_real_connect(conn, hostname, username, password, NULL, port, socket_path, 0)) { g_critical("Error connection to database: %s", mysql_error(conn)); exit(EXIT_FAILURE); } if (!enable_binlog) mysql_query(conn, "SET SQL_LOG_BIN=0"); mysql_query(conn, "/*!40014 SET FOREIGN_KEY_CHECKS=0*/"); conf.queue= g_async_queue_new(); conf.ready= g_async_queue_new(); guint n; GThread **threads= g_new(GThread*, num_threads); struct thread_data *td= g_new(struct thread_data, num_threads); for (n= 0; n < num_threads; n++) { td[n].conf= &conf; td[n].thread_id= n+1; threads[n]= g_thread_create((GThreadFunc)process_queue, &td[n], TRUE, NULL); g_async_queue_pop(conf.ready); } g_async_queue_unref(conf.ready); g_message("%d threads created", num_threads); restore_databases(&conf, conn); for (n= 0; n < num_threads; n++) { struct job *j= g_new0(struct job, 1); j->type = JOB_SHUTDOWN; g_async_queue_push(conf.queue, j); } for (n= 0; n < num_threads; n++) { g_thread_join(threads[n]); } g_async_queue_unref(conf.queue); mysql_close(conn); mysql_thread_end(); mysql_library_end(); g_free(directory); g_free(td); g_free(threads); return errors ? EXIT_FAILURE : EXIT_SUCCESS; } void restore_databases(struct configuration *conf, MYSQL *conn) { GError *error= NULL; GDir* dir= g_dir_open(directory, 0, &error); if (error) { g_critical("cannot open directory %s, %s\n", directory, error->message); errors++; return; } const gchar* filename= NULL; while((filename= g_dir_read_name(dir))) { if (g_strrstr(filename, "-schema.sql")) { add_schema(filename, conn); } } g_dir_rewind(dir); while((filename= g_dir_read_name(dir))) { if (!g_strrstr(filename, "-schema.sql") && g_strrstr(filename, ".sql")) { add_table(filename, conf); } } g_dir_close(dir); } void add_schema(const gchar* filename, MYSQL *conn) { // 0 is database, 1 is table with -schema on the end gchar** split_file= g_strsplit(filename, ".", 0); gchar* database= split_file[0]; // Remove the -schema from the table name gchar** split_table= g_strsplit(split_file[1], "-", 0); gchar* table= split_table[0]; gchar* query= g_strdup_printf("SHOW CREATE DATABASE `%s`", db ? db : database); if (mysql_query(conn, query)) { g_free(query); g_message("Creating database `%s`", db ? db : database); query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database); mysql_query(conn, query); } else { MYSQL_RES *result= mysql_store_result(conn); // In drizzle the query succeeds with no rows my_ulonglong row_count= mysql_num_rows(result); mysql_free_result(result); if (row_count == 0) { // TODO: Move this to a function, it is the same as above g_free(query); g_message("Creating database `%s`", db ? db : database); query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database); mysql_query(conn, query); } } g_free(query); if (overwrite_tables) { g_message("Dropping table (if exists) `%s`.`%s`", db ? db : database, table); query= g_strdup_printf("DROP TABLE IF EXISTS `%s`.`%s`", db ? db : database, table); mysql_query(conn, query); g_free(query); } g_message("Creating table `%s`.`%s`", db ? db : database, table); restore_data(conn, database, table, filename, TRUE); g_strfreev(split_table); g_strfreev(split_file); return; } void add_table(const gchar* filename, struct configuration *conf) { struct job *j= g_new0(struct job, 1); struct restore_job *rj= g_new(struct restore_job, 1); j->job_data= (void*) rj; rj->filename= g_strdup(filename); j->type= JOB_RESTORE; gchar** split_file= g_strsplit(filename, ".", 0); rj->database= g_strdup(split_file[0]); rj->table= g_strdup(split_file[1]); rj->part= g_ascii_strtoull(split_file[2], NULL, 10); g_async_queue_push(conf->queue, j); return; } void *process_queue(struct thread_data *td) { struct configuration *conf= td->conf; g_mutex_lock(init_mutex); MYSQL *thrconn= mysql_init(NULL); g_mutex_unlock(init_mutex); mysql_options(thrconn, MYSQL_READ_DEFAULT_GROUP, "myloader"); if (compress_protocol) mysql_options(thrconn, MYSQL_OPT_COMPRESS, NULL); if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) { g_critical("Failed to connect to MySQL server: %s", mysql_error(thrconn)); exit(EXIT_FAILURE); } if (!enable_binlog) mysql_query(thrconn, "SET SQL_LOG_BIN=0"); mysql_query(thrconn, "/*!40101 SET NAMES binary*/"); mysql_query(thrconn, "SET autocommit=0"); g_async_queue_push(conf->ready, GINT_TO_POINTER(1)); struct job* job= NULL; struct restore_job* rj= NULL; for(;;) { job= (struct job*)g_async_queue_pop(conf->queue); switch (job->type) { case JOB_RESTORE: rj= (struct restore_job *)job->job_data; g_message("Thread %d restoring `%s`.`%s` part %d", td->thread_id, rj->database, rj->table, rj->part); restore_data(thrconn, rj->database, rj->table, rj->filename, FALSE); if (rj->database) g_free(rj->database); if (rj->table) g_free(rj->table); if (rj->filename) g_free(rj->filename); g_free(rj); g_free(job); break; case JOB_SHUTDOWN: g_message("Thread %d shutting down", td->thread_id); if (thrconn) mysql_close(thrconn); g_free(job); mysql_thread_end(); return NULL; break; default: g_critical("Something very bad happened!"); exit(EXIT_FAILURE); } } if (thrconn) mysql_close(thrconn); mysql_thread_end(); return NULL; } void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema) { void *infile; gboolean is_compressed= FALSE; gboolean eof= FALSE; guint query_counter= 0; GString *data= g_string_sized_new(512); gchar* path= g_build_filename(directory, filename, NULL); if (!g_str_has_suffix(path, ".gz")) { infile= g_fopen(path, "r"); is_compressed= FALSE; } else { infile= (void*) gzopen(path, "r"); is_compressed= TRUE; } if (!infile) { g_critical("cannot open file %s (%d)", filename, errno); errors++; return; } gchar *query= g_strdup_printf("USE `%s`", db ? db : database); if (mysql_query(conn, query)) { g_critical("Error switching to database %s whilst restoring table %s", db ? db : database, table); g_free(query); errors++; return; } g_free(query); if (!is_schema) mysql_query(conn, "START TRANSACTION"); while (eof == FALSE) { if (read_data(infile, is_compressed, data, &eof)) { // Search for ; in last 5 chars of line if (g_strrstr(&data->str[data->len >= 5 ? data->len - 5 : 0], ";\n")) { if (mysql_real_query(conn, data->str, data->len)) { g_critical("Error restoring %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn)); errors++; return; } query_counter++; if (!is_schema &&(query_counter == commit_count)) { query_counter= 0; if (mysql_query(conn, "COMMIT")) { g_critical("Error committing data for %s.%s: %s", db ? db : database, table, mysql_error(conn)); errors++; return; } mysql_query(conn, "START TRANSACTION"); } g_string_set_size(data, 0); } } else { g_critical("error reading file %s (%d)", filename, errno); errors++; return; } } if (!is_schema && mysql_query(conn, "COMMIT")) { g_critical("Error committing data for %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn)); errors++; } g_string_free(data, TRUE); g_free(path); if (!is_compressed) { fclose(infile); } else { gzclose((gzFile)infile); } return; } gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof) { char buffer[256]; do { if (!is_compressed) { if (fgets(buffer, 256, file) == NULL) { if (feof(file)) { *eof= TRUE; buffer[0]= '\0'; } else { return FALSE; } } } else { if (!gzgets((gzFile)file, buffer, 256)) { if (gzeof((gzFile)file)) { *eof= TRUE; buffer[0]= '\0'; } else { return FALSE; } } } g_string_append(data, buffer); } while ((buffer[strlen(buffer)] != '\0') && *eof == FALSE); return TRUE; } mydumper-0.5.2/myloader.h0000644000000000000000000000240412052470045013472 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Domas Mituzas, Facebook ( domas at fb dot com ) Mark Leith, Oracle Corporation (mark dot leith at oracle dot com) Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #ifndef _myloader_h #define _myloader_h enum job_type { JOB_SHUTDOWN, JOB_RESTORE }; struct configuration { GAsyncQueue* queue; GAsyncQueue* ready; GMutex* mutex; int done; }; struct thread_data { struct configuration *conf; guint thread_id; }; struct job { enum job_type type; void *job_data; struct configuration *conf; }; struct restore_job { char *database; char *table; char *filename; guint part; }; #endif mydumper-0.5.2/server_detect.c0000644000000000000000000000314212052470045014507 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #include #include #include #include "server_detect.h" int detect_server(MYSQL *conn) { pcre *re= NULL; const char *error; int erroroffset; int ovector[9]= {0}; int rc; const char* db_version= mysql_get_server_info(conn); re= pcre_compile(DETECT_MYSQL_REGEX, 0, &error, &erroroffset, NULL); if (!re) { g_critical("Regular expression fail: %s", error); exit(EXIT_FAILURE); } rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9); pcre_free(re); if (rc > 0) { return SERVER_TYPE_MYSQL; } re= pcre_compile(DETECT_DRIZZLE_REGEX, 0, &error, &erroroffset, NULL); if (!re) { g_critical("Regular expression fail: %s", error); exit(EXIT_FAILURE); } rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9); pcre_free(re); if (rc > 0) { return SERVER_TYPE_DRIZZLE; } return SERVER_TYPE_UNKNOWN; } mydumper-0.5.2/server_detect.h0000644000000000000000000000203512052470045014514 0ustar 00000000000000/* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com) */ #ifndef _server_detect_h #define _server_detect_h #include #define DETECT_MYSQL_REGEX "^([3-9]\\.[0-9]+\\.[0-9]+)" #define DETECT_DRIZZLE_REGEX "^(20[0-9]{2}\\.(0[1-9]|1[012])\\.[0-9]+)" enum server_type { SERVER_TYPE_UNKNOWN, SERVER_TYPE_MYSQL, SERVER_TYPE_DRIZZLE }; int detect_server(MYSQL *conn); #endif mydumper-0.5.2/cmake/modules/0000755000000000000000000000000012052470045014235 5ustar 00000000000000mydumper-0.5.2/cmake/modules/CppcheckTargets.cmake0000644000000000000000000001364212052470045020317 0ustar 00000000000000# - Run cppcheck on c++ source files as a custom target and a test # # include(CppcheckTargets) # add_cppcheck( [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) - # Create a target to check a target's sources with cppcheck and the indicated options # add_cppcheck_sources( [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) - # Create a target to check standalone sources with cppcheck and the indicated options # # Requires these CMake modules: # Findcppcheck # # Requires CMake 2.6 or newer (uses the 'function' command) # # Original Author: # 2009-2010 Ryan Pavlik # http://academic.cleardefinition.com # Iowa State University HCI Graduate Program/VRAC # # Copyright Iowa State University 2009-2010. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) if(__add_cppcheck) return() endif() set(__add_cppcheck YES) if(NOT CPPCHECK_FOUND) find_package(cppcheck QUIET) endif() if(CPPCHECK_FOUND) if(NOT TARGET all_cppcheck) add_custom_target(all_cppcheck) set_target_properties(all_cppcheck PROPERTIES EXCLUDE_FROM_ALL TRUE) endif() endif() function(add_cppcheck_sources _targetname) if(CPPCHECK_FOUND) set(_cppcheck_args) set(_input ${ARGN}) list(FIND _input UNUSED_FUNCTIONS _unused_func) if("${_unused_func}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG}) list(REMOVE_AT _input ${_unused_func}) endif() list(FIND _input STYLE _style) if("${_style}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG}) list(REMOVE_AT _input ${_style}) endif() list(FIND _input POSSIBLE_ERROR _poss_err) if("${_poss_err}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG}) list(REMOVE_AT _input ${_poss_err}) endif() list(FIND _input FAIL_ON_WARNINGS _fail_on_warn) if("${_fail_on_warn}" GREATER "-1") list(APPEND CPPCHECK_FAIL_REGULAR_EXPRESSION ${CPPCHECK_WARN_REGULAR_EXPRESSION}) list(REMOVE_AT _input ${_fail_on_warn}) endif() set(_files) foreach(_source ${_input}) get_source_file_property(_cppcheck_loc "${_source}" LOCATION) if(_cppcheck_loc) # This file has a source file property, carry on. get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE) if("${_cppcheck_lang}" MATCHES "C") list(APPEND _files "${_cppcheck_loc}") endif() else() # This file doesn't have source file properties - figure it out. get_filename_component(_cppcheck_loc "${_source}" ABSOLUTE) if(EXISTS "${_cppcheck_loc}") list(APPEND _files "${_cppcheck_loc}") else() message(FATAL_ERROR "Adding CPPCHECK for file target ${_targetname}: " "File ${_source} does not exist or needs a corrected path location " "since we think its absolute path is ${_cppcheck_loc}") endif() endif() endforeach() if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0") # Older than CMake 2.8.0 add_test(${_targetname}_cppcheck_test "${CPPCHECK_EXECUTABLE}" ${CPPCHECK_TEMPLATE_ARG} ${_cppcheck_args} ${_files}) else() # CMake 2.8.0 and newer add_test(NAME ${_targetname}_cppcheck_test COMMAND "${CPPCHECK_EXECUTABLE}" ${CPPCHECK_TEMPLATE_ARG} ${_cppcheck_args} ${_files}) endif() set_tests_properties(${_targetname}_cppcheck_test PROPERTIES FAIL_REGULAR_EXPRESSION "${CPPCHECK_FAIL_REGULAR_EXPRESSION}") add_custom_command(TARGET all_cppcheck PRE_BUILD COMMAND ${CPPCHECK_EXECUTABLE} ${CPPCHECK_QUIET_ARG} ${CPPCHECK_TEMPLATE_ARG} ${_cppcheck_args} ${_files} WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" COMMENT "${_targetname}_cppcheck: Running cppcheck on target ${_targetname}..." VERBATIM) endif() endfunction() function(add_cppcheck _name) if(NOT TARGET ${_name}) message(FATAL_ERROR "add_cppcheck given a target name that does not exist: '${_name}' !") endif() if(CPPCHECK_FOUND) set(_cppcheck_args) list(FIND ARGN UNUSED_FUNCTIONS _unused_func) if("${_unused_func}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG}) endif() list(FIND ARGN STYLE _style) if("${_style}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG}) endif() list(FIND ARGN POSSIBLE_ERROR _poss_err) if("${_poss_err}" GREATER "-1") list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG}) endif() list(FIND _input FAIL_ON_WARNINGS _fail_on_warn) if("${_fail_on_warn}" GREATER "-1") list(APPEND CPPCHECK_FAIL_REGULAR_EXPRESSION ${CPPCHECK_WARN_REGULAR_EXPRESSION}) list(REMOVE_AT _input ${_unused_func}) endif() get_target_property(_cppcheck_sources "${_name}" SOURCES) set(_files) foreach(_source ${_cppcheck_sources}) get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE) get_source_file_property(_cppcheck_loc "${_source}" LOCATION) if("${_cppcheck_lang}" MATCHES "C") list(APPEND _files "${_cppcheck_loc}") endif() endforeach() if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0") # Older than CMake 2.8.0 add_test(${_name}_cppcheck_test "${CPPCHECK_EXECUTABLE}" ${CPPCHECK_TEMPLATE_ARG} ${_cppcheck_args} ${_files}) else() # CMake 2.8.0 and newer add_test(NAME ${_name}_cppcheck_test COMMAND "${CPPCHECK_EXECUTABLE}" ${CPPCHECK_TEMPLATE_ARG} ${_cppcheck_args} ${_files}) endif() set_tests_properties(${_name}_cppcheck_test PROPERTIES FAIL_REGULAR_EXPRESSION "${CPPCHECK_FAIL_REGULAR_EXPRESSION}") add_custom_command(TARGET all_cppcheck PRE_BUILD COMMAND ${CPPCHECK_EXECUTABLE} ${CPPCHECK_QUIET_ARG} ${CPPCHECK_TEMPLATE_ARG} "--enable=style,information,unusedFunction" ${_cppcheck_args} ${_files} WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" COMMENT "${_name}_cppcheck: Running cppcheck on target ${_name}..." VERBATIM) endif() endfunction() mydumper-0.5.2/cmake/modules/FindGLIB2.cmake0000644000000000000000000000132712052470045016642 0ustar 00000000000000# - Try to find the GLIB2 libraries if(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES) # Already in cache, be silent set(GLIB2_FIND_QUIETLY TRUE) endif(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES) if (NOT WIN32) include(FindPkgConfig) pkg_search_module(PC_GLIB2 REQUIRED glib-2.0) pkg_search_module(PC_GTHREAD2 REQUIRED gthread-2.0) endif(NOT WIN32) set(GLIB2_INCLUDE_DIR ${PC_GLIB2_INCLUDE_DIRS}) find_library(GLIB2_LIBRARIES NAMES glib-2.0 HINTS ${PC_GLIB2_LIBDIR} ${PC_GLIB2_LIBRARY_DIRS}) find_library(GTHREAD2_LIBRARIES NAMES gthread-2.0 HINTS ${PC_GTHREAD2_LIBDIR} ${PC_GTHREAD2_LIBRARY_DIRS}) mark_as_advanced(GLIB2_INCLUDE_DIR GLIB2_LIBRARIES GTHREAD2_LIBRARIES) mydumper-0.5.2/cmake/modules/FindMySQL.cmake0000644000000000000000000000711512052470045017011 0ustar 00000000000000# - Find MySQL # Find the MySQL includes and client library # This module defines # MYSQL_INCLUDE_DIR, where to find mysql.h # MYSQL_LIBRARIES, the libraries needed to use MySQL. # MYSQL_FOUND, If false, do not try to use MySQL. # # Copyright (c) 2006, Jaroslaw Staniek, # Lot of adustmens by Michal Cihar # # vim: expandtab sw=4 ts=4 sts=4: # # Redistribution and use is allowed according to the terms of the BSD license. if(UNIX) set(MYSQL_CONFIG_PREFER_PATH "$ENV{MYSQL_HOME}/bin" CACHE FILEPATH "preferred path to MySQL (mysql_config)") find_program(MYSQL_CONFIG mysql_config ${MYSQL_CONFIG_PREFER_PATH} /usr/local/mysql/bin/ /usr/local/bin/ /usr/bin/ ) if(MYSQL_CONFIG) message(STATUS "Using mysql-config: ${MYSQL_CONFIG}") # set CFLAGS exec_program(${MYSQL_CONFIG} ARGS --cflags OUTPUT_VARIABLE MY_TMP) set(MYSQL_CFLAGS ${MY_TMP} CACHE STRING INTERNAL) # set INCLUDE_DIR exec_program(${MYSQL_CONFIG} ARGS --include OUTPUT_VARIABLE MY_TMP) string(REGEX REPLACE "-I([^ ]*)( .*)?" "\\1" MY_TMP "${MY_TMP}") set(MYSQL_ADD_INCLUDE_DIR ${MY_TMP} CACHE FILEPATH INTERNAL) # set LIBRARY_DIR exec_program(${MYSQL_CONFIG} ARGS --libs_r OUTPUT_VARIABLE MY_TMP) set(MYSQL_ADD_LIBRARIES "") # prepend space in order to match separate words only (e.g. rather # than "-linux" from within "-L/usr/lib/i386-linux-gnu") string(REGEX MATCHALL " +-l[^ ]*" MYSQL_LIB_LIST " ${MY_TMP}") foreach(MY_LIB ${MYSQL_LIB_LIST}) string(REGEX REPLACE "[ ]*-l([^ ]*)" "\\1" MY_LIB "${MY_LIB}") list(APPEND MYSQL_ADD_LIBRARIES "${MY_LIB}") endforeach(MY_LIB ${MYSQL_LIBS}) set(MYSQL_ADD_LIBRARY_PATH "") string(REGEX MATCHALL " +-L[^ ]*" MYSQL_LIBDIR_LIST " ${MY_TMP}") foreach(MY_LIB ${MYSQL_LIBDIR_LIST}) string(REGEX REPLACE "[ ]*-L([^ ]*)" "\\1" MY_LIB "${MY_LIB}") list(APPEND MYSQL_ADD_LIBRARY_PATH "${MY_LIB}") endforeach(MY_LIB ${MYSQL_LIBS}) else(MYSQL_CONFIG) set(MYSQL_ADD_LIBRARIES "") list(APPEND MYSQL_ADD_LIBRARIES "mysqlclient") endif(MYSQL_CONFIG) else(UNIX) set(MYSQL_ADD_INCLUDE_DIR "c:/msys/local/include" CACHE FILEPATH INTERNAL) set(MYSQL_ADD_LIBRARY_PATH "c:/msys/local/lib" CACHE FILEPATH INTERNAL) ENDIF(UNIX) find_path(MYSQL_INCLUDE_DIR mysql.h ${MYSQL_ADD_INCLUDE_DIR} /usr/local/include /usr/local/include/mysql /usr/local/mysql/include /usr/local/mysql/include/mysql /usr/include /usr/include/mysql ) set(TMP_MYSQL_LIBRARIES "") foreach(MY_LIB ${MYSQL_ADD_LIBRARIES}) find_library("MYSQL_LIBRARIES_${MY_LIB}" NAMES ${MY_LIB} HINTS ${MYSQL_ADD_LIBRARY_PATH} /usr/lib/mysql /usr/lib /usr/local/lib /usr/local/lib/mysql /usr/local/mysql/lib ) list(APPEND TMP_MYSQL_LIBRARIES "${MYSQL_LIBRARIES_${MY_LIB}}") endforeach(MY_LIB ${MYSQL_ADD_LIBRARIES}) set(MYSQL_LIBRARIES ${TMP_MYSQL_LIBRARIES} CACHE FILEPATH INTERNAL) if(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) set(MYSQL_FOUND TRUE CACHE INTERNAL "MySQL found") message(STATUS "Found MySQL: ${MYSQL_INCLUDE_DIR}, ${MYSQL_LIBRARIES}") else(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) set(MYSQL_FOUND FALSE CACHE INTERNAL "MySQL found") message(STATUS "MySQL not found.") endif(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES) mark_as_advanced(MYSQL_INCLUDE_DIR MYSQL_LIBRARIES MYSQL_CFLAGS) mydumper-0.5.2/cmake/modules/FindPCRE.cmake0000644000000000000000000000302112052470045016565 0ustar 00000000000000# - Try to find the PCRE regular expression library # Once done this will define # # PCRE_FOUND - system has the PCRE library # PCRE_INCLUDE_DIR - the PCRE include directory # PCRE_LIBRARIES - The libraries needed to use PCRE # Copyright (c) 2006, Alexander Neundorf, # # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. if (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY) # Already in cache, be silent set(PCRE_FIND_QUIETLY TRUE) endif (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY) if (NOT WIN32) # use pkg-config to get the directories and then use these values # in the FIND_PATH() and FIND_LIBRARY() calls find_package(PkgConfig) pkg_check_modules(PC_PCRE REQUIRED libpcre) set(PCRE_DEFINITIONS ${PC_PCRE_CFLAGS_OTHER}) endif (NOT WIN32) find_path(PCRE_INCLUDE_DIR pcre.h HINTS ${PC_PCRE_INCLUDEDIR} ${PC_PCRE_INCLUDE_DIRS} PATH_SUFFIXES pcre) find_library(PCRE_PCRE_LIBRARY NAMES pcre HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS}) find_library(PCRE_PCREPOSIX_LIBRARY NAMES pcreposix HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(PCRE DEFAULT_MSG PCRE_INCLUDE_DIR PCRE_PCRE_LIBRARY PCRE_PCREPOSIX_LIBRARY ) set(PCRE_LIBRARIES ${PCRE_PCRE_LIBRARY} ${PCRE_PCREPOSIX_LIBRARY}) mark_as_advanced(PCRE_INCLUDE_DIR PCRE_LIBRARIES PCRE_PCREPOSIX_LIBRARY PCRE_PCRE_LIBRARY) mydumper-0.5.2/cmake/modules/FindSphinx.cmake0000644000000000000000000000325612052470045017317 0ustar 00000000000000# - This module looks for Sphinx # Find the Sphinx documentation generator # # This modules defines # SPHINX_EXECUTABLE # SPHINX_FOUND # SPHINX_MAJOR_VERSION # SPHINX_MINOR_VERSION # SPHINX_VERSION #============================================================================= # Copyright 2002-2009 Kitware, Inc. # Copyright 2009-2011 Peter Colberg # # Distributed under the OSI-approved BSD License (the "License"); # see accompanying file COPYING-CMAKE-SCRIPTS for details. # # This software is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the License for more information. #============================================================================= # (To distribute this file outside of CMake, substitute the full # License text for the above reference.) find_program(SPHINX_EXECUTABLE NAMES sphinx-build HINTS $ENV{SPHINX_DIR} PATH_SUFFIXES bin DOC "Sphinx documentation generator" ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Sphinx DEFAULT_MSG SPHINX_EXECUTABLE ) if (SPHINX_EXECUTABLE) execute_process ( COMMAND "${SPHINX_EXECUTABLE}" -h OUTPUT_VARIABLE _SPHINX_VERSION_OUTPUT ERROR_VARIABLE _SPHINX_VERSION_OUTPUT ) if (_SPHINX_VERSION_OUTPUT MATCHES "Sphinx v([0-9]+\\.[0-9]+\\.[0-9]+)") set (SPHINX_VERSION "${CMAKE_MATCH_1}") string (REPLACE "." ";" _SPHINX_VERSION_LIST "${SPHINX_VERSION}") list (GET _SPHINX_VERSION_LIST 0 SPHINX_MAJOR_VERSION) list (GET _SPHINX_VERSION_LIST 1 SPHINX_MINOR_VERSION) # patch version meh :) endif() endif() message("${SPHINX_MAJOR_VERSION}") mark_as_advanced( SPHINX_EXECUTABLE ) mydumper-0.5.2/cmake/modules/Findcppcheck.cmake0000644000000000000000000001025612052470045017624 0ustar 00000000000000# - try to find cppcheck tool # # Cache Variables: # CPPCHECK_EXECUTABLE # # Non-cache variables you might use in your CMakeLists.txt: # CPPCHECK_FOUND # CPPCHECK_POSSIBLEERROR_ARG # CPPCHECK_UNUSEDFUNC_ARG # CPPCHECK_STYLE_ARG # CPPCHECK_QUIET_ARG # CPPCHECK_INCLUDEPATH_ARG # CPPCHECK_FAIL_REGULAR_EXPRESSION # CPPCHECK_WARN_REGULAR_EXPRESSION # CPPCHECK_MARK_AS_ADVANCED - whether to mark our vars as advanced even # if we don't find this program. # # Requires these CMake modules: # FindPackageHandleStandardArgs (known included with CMake >=2.6.2) # # Original Author: # 2009-2010 Ryan Pavlik # http://academic.cleardefinition.com # Iowa State University HCI Graduate Program/VRAC # # Copyright Iowa State University 2009-2010. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) file(TO_CMAKE_PATH "${CPPCHECK_ROOT_DIR}" CPPCHECK_ROOT_DIR) set(CPPCHECK_ROOT_DIR "${CPPCHECK_ROOT_DIR}" CACHE PATH "Path to search for cppcheck") # cppcheck app bundles on Mac OS X are GUI, we want command line only set(_oldappbundlesetting ${CMAKE_FIND_APPBUNDLE}) set(CMAKE_FIND_APPBUNDLE NEVER) # If we have a custom path, look there first. if(CPPCHECK_ROOT_DIR) find_program(CPPCHECK_EXECUTABLE NAMES cppcheck cli PATHS "${CPPCHECK_ROOT_DIR}" PATH_SUFFIXES cli NO_DEFAULT_PATH) endif() find_program(CPPCHECK_EXECUTABLE NAMES cppcheck) # Restore original setting for appbundle finding set(CMAKE_FIND_APPBUNDLE ${_oldappbundlesetting}) if(CPPCHECK_EXECUTABLE) # Find out where our test file is get_filename_component(_cppcheckmoddir ${CMAKE_CURRENT_LIST_FILE} PATH) set(_cppcheckdummyfile "${_cppcheckmoddir}/Findcppcheck.cpp") # Check for the two types of command line arguments by just trying them execute_process(COMMAND "${CPPCHECK_EXECUTABLE}" "--enable=style" "--quiet" "${_cppcheckdummyfile}" RESULT_VARIABLE _cppcheck_new_result OUTPUT_QUIET ERROR_QUIET) execute_process(COMMAND "${CPPCHECK_EXECUTABLE}" "--style" "--quiet" "${_cppcheckdummyfile}" RESULT_VARIABLE _cppcheck_old_result OUTPUT_QUIET ERROR_QUIET) if("${_cppcheck_new_result}" EQUAL 0) # New arguments set(CPPCHECK_UNUSEDFUNC_ARG "--enable=unusedFunctions") set(CPPCHECK_POSSIBLEERROR_ARG "--enable=possibleError") set(CPPCHECK_STYLE_ARG "--enable=style") set(CPPCHECK_QUIET_ARG "--quiet") set(CPPCHECK_INCLUDEPATH_ARG "-I") if(MSVC) set(CPPCHECK_TEMPLATE_ARG --template vs) set(CPPCHECK_FAIL_REGULAR_EXPRESSION "[(]error[)]") set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]") elseif(CMAKE_COMPILER_IS_GNUCXX) set(CPPCHECK_TEMPLATE_ARG --template gcc) set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ") set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ") else() message(STATUS "Warning: FindCppcheck doesn't know how to format error messages for your compiler!") set(CPPCHECK_TEMPLATE_ARG --template gcc) set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ") set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ") endif() elseif("${_cppcheck_old_result}" EQUAL 0) # Old arguments set(CPPCHECK_UNUSEDFUNC_ARG "--unused-functions") set(CPPCHECK_POSSIBLEERROR_ARG "--all") set(CPPCHECK_STYLE_ARG "--style") set(CPPCHECK_QUIET_ARG "--quiet") set(CPPCHECK_INCLUDEPATH_ARG "-I") set(CPPCHECK_FAIL_REGULAR_EXPRESSION "error:") set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]") else() # No idea - some other issue must be getting in the way message(STATUS "WARNING: Can't detect whether CPPCHECK wants new or old-style arguments!") endif() endif() set(CPPCHECK_ALL "${CPPCHECK_EXECUTABLE} ${CPPCHECK_POSSIBLEERROR_ARG} ${CPPCHECK_UNUSEDFUNC_ARG} ${CPPCHECK_STYLE_ARG} ${CPPCHECK_QUIET_ARG} ${CPPCHECK_INCLUDEPATH_ARG} some/include/path") include(FindPackageHandleStandardArgs) find_package_handle_standard_args(cppcheck DEFAULT_MSG CPPCHECK_ALL CPPCHECK_EXECUTABLE CPPCHECK_POSSIBLEERROR_ARG CPPCHECK_UNUSEDFUNC_ARG CPPCHECK_STYLE_ARG CPPCHECK_INCLUDEPATH_ARG CPPCHECK_QUIET_ARG) if(CPPCHECK_FOUND OR CPPCHECK_MARK_AS_ADVANCED) mark_as_advanced(CPPCHECK_ROOT_DIR) endif() mark_as_advanced(CPPCHECK_EXECUTABLE) mydumper-0.5.2/cmake/modules/Findcppcheck.cpp0000644000000000000000000000041112052470045017316 0ustar 00000000000000/** * \file Findcppcheck.cpp * \brief Dummy C++ source file used by CMake module Findcppcheck.cmake * * \author * Ryan Pavlik, 2009-2010 * * http://academic.cleardefinition.com/ * */ int main(int argc, char* argv[]) { return 0; } mydumper-0.5.2/docs/CMakeLists.txt0000644000000000000000000001047512052470045015204 0ustar 00000000000000# Generate documentation in HTML and PDF format using Sphinx. set(GENERATE_DOC TRUE) # We use the Sphinx documentation generator to render HTML and manual # pages from the user and reference documentation in ReST format. find_package(Sphinx QUIET) if(NOT SPHINX_FOUND) message(WARNING "Unable to find Sphinx documentation generator") set(GENERATE_DOC FALSE) endif(NOT SPHINX_FOUND) if(SPHINX_MAJOR_VERSION LESS 1) message(WARNING "Sphinx is older than v1.0, not building docs") set(GENERATE_DOC FALSE) endif(SPHINX_MAJOR_VERSION LESS 1) if(GENERATE_DOC) # documentation tools set(SOURCE_BUILD_DIR "${CMAKE_CURRENT_SOURCE_DIR}/_build") # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") # static ReST documentation sources set(SOURCES_DIR "${CMAKE_CURRENT_BINARY_DIR}/_sources") # generated ReST documentation sources set(REF_SOURCES_DIR "${SOURCES_DIR}/reference") # master document with modules index set(REF_MASTER_DOC "modules") # substitute variables in configuration and scripts foreach(file conf.py sources.cmake ) configure_file( "${SOURCE_BUILD_DIR}/${file}.in" "${BINARY_BUILD_DIR}/${file}" @ONLY ) endforeach(file) set(CLEAN_FILES "${BINARY_BUILD_DIR}/html" ) add_custom_target(ALL DEPENDS "${REF_SOURCES_DIR}/${REF_MASTER_DOC}.rst" ) # Sphinx requires all sources in the same directory tree. As we wish # to include generated reference documention from the build tree, we # copy static ReST documents to the build tree before calling Sphinx. add_custom_target(doc_sources ALL "${CMAKE_COMMAND}" -P "${BINARY_BUILD_DIR}/sources.cmake" ) list(APPEND CLEAN_FILES "${SOURCES_DIR}" ) # note the trailing slash to exclude directory name install(DIRECTORY "${SOURCES_DIR}/" DESTINATION "share/doc/mydumper" ) # Sphinx cache with pickled ReST documents set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") # HTML output directory set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") # This target builds HTML documentation using Sphinx. add_custom_target(doc_html ALL ${SPHINX_EXECUTABLE} -q -b html -c "${BINARY_BUILD_DIR}" -d "${SPHINX_CACHE_DIR}" "${SOURCES_DIR}" "${SPHINX_HTML_DIR}" COMMENT "Building HTML documentation with Sphinx" ) list(APPEND CLEAN_FILES "${SPHINX_CACHE_DIR}" "${SPHINX_HTML_DIR}" ) add_dependencies(doc_html doc_sources ) install(DIRECTORY "${SPHINX_HTML_DIR}" DESTINATION "share/doc/mydumper" ) # HTML output directory set(SPHINX_MAN_DIR "${CMAKE_CURRENT_BINARY_DIR}/man") # This target builds a manual page using Sphinx. add_custom_target(doc_man ALL ${SPHINX_EXECUTABLE} -q -b man -c "${BINARY_BUILD_DIR}" -d "${SPHINX_CACHE_DIR}" "${SOURCES_DIR}" "${SPHINX_MAN_DIR}" COMMENT "Building manual page with Sphinx" ) list(APPEND CLEAN_FILES "${SPHINX_MAN_DIR}" ) add_dependencies(doc_man doc_sources ) # serialize Sphinx targets to avoid cache conflicts in parallel builds add_dependencies(doc_man doc_html ) install(FILES "${SPHINX_MAN_DIR}/mydumper.1" "${SPHINX_MAN_DIR}/myloader.1" DESTINATION "share/man/man1" ) # This target builds PDF documentation using Sphinx and LaTeX. if(PDFLATEX_COMPILER) # PDF output directory set(SPHINX_PDF_DIR "${CMAKE_CURRENT_BINARY_DIR}/pdf") add_custom_target(doc_pdf ALL ${SPHINX_EXECUTABLE} -q -b latex -c "${BINARY_BUILD_DIR}" -d "${SPHINX_CACHE_DIR}" "${SOURCES_DIR}" "${SPHINX_PDF_DIR}" COMMENT "Building PDF documentation with Sphinx" ) add_custom_command(TARGET doc_pdf POST_BUILD COMMAND ${CMAKE_MAKE_PROGRAM} LATEXOPTS=-interaction=batchmode WORKING_DIRECTORY "${SPHINX_PDF_DIR}" ) list(APPEND CLEAN_FILES "${SPHINX_PDF_DIR}" ) add_dependencies(doc_pdf doc_sources ) # serialize Sphinx targets to avoid cache conflicts in parallel builds add_dependencies(doc_pdf doc_man ) install(FILES "${SPHINX_PDF_DIR}/mydumper.pdf" DESTINATION "share/doc/mydumper" ) endif(PDFLATEX_COMPILER) # Add output directories to clean target. set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${CLEAN_FILES}" ) endif(GENERATE_DOC) mydumper-0.5.2/docs/_build/0000755000000000000000000000000012052470045013673 5ustar 00000000000000mydumper-0.5.2/docs/_static/0000755000000000000000000000000012052470045014063 5ustar 00000000000000mydumper-0.5.2/docs/authors.rst0000644000000000000000000000053512052470045014657 0ustar 00000000000000Authors ======= The code for mydumper has been written by the following people: * `Domas Mituzas `_, Facebook ( domas at fb dot com ) * `Andrew Hutchings `_, SkySQL ( andrew at skysql dot com ) * `Mark Leith `_, Oracle Corporation ( mark dot leith at oracle dot com ) mydumper-0.5.2/docs/compiling.rst0000644000000000000000000000270112052470045015150 0ustar 00000000000000Compiling ========= Requirements ------------ mydumper requires the following before it can be compiled: * `CMake `_ * `Glib2 `_ (with development packages) * `PCRE `_ (with development packages) * `MySQL `_ client libraries (with development packages) Additionally the following packages are optional: * `python-sphinx `_ (for documentation) Ubuntu/Debian ^^^^^^^^^^^^^ .. code-block:: bash apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev Fedora/Redhat/CentOS ^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash yum install glib2-devel mysql-devel zlib-devel pcre-devel OpenSUSE ^^^^^^^^ .. code-block:: bash zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel Mac OSX ^^^^^^^ .. code-block:: bash port install glib2 mysql5 pcre CMake ----- CMake is used for mydumper's build system and is executed as follows:: cmake . make You can optionally provide parameters for CMake, the possible options are: * ``-DMYSQL_CONFIG=/path/to/mysql_config`` - The path and filename for the mysql_config executable * ``-DCMAKE_INSTALL_PREFIX=/install/path`` - The path where mydumper should be installed Documentation ------------- If you wish to just compile the documentation you can do so with:: cmake . make doc_html or for a man page output:: cmake . make doc_man mydumper-0.5.2/docs/examples.rst0000644000000000000000000000262012052470045015005 0ustar 00000000000000Examples ======== Simple Usage ------------ Just running :program:`mydumper` without any options will try to connect to a server using the default socket path. It will then dump the tables from all databases using 4 worker threads. Regex ----- To use :program:`mydumper`'s regex feature simply use the :option:`--regex ` option. In the following example mydumper will ignore the ``test`` and ``mysql`` databases:: mydumper --regex '^(?!(mysql|test))' Restoring a dump ---------------- Mydumper now include myloader which is a multi-threaded restoration tool. To use myloader with a mydumper dump you simply need to pass it the directory of the dump along with a user capable of restoring the schemas and data. As an example the following will restore a dump overwriting any existing tables:: myloader --directory=export-20110614-094953 --overwrite-tables --user=root Daemon mode ----------- Mydumper has a daemon mode which will snapshot the dump data every so often whilst continuously retreiving the binary log files. This gives a continuous consistent backup right up to the point where the database server fails. To use this you simply need to use the :option:`--daemon ` option. In the following example mydumper will use daemon mode, creating a snapshot every half an hour and log to an output file:: mydumper --daemon --snapshot-interval=30 --logfile=dump.log mydumper-0.5.2/docs/files.rst0000644000000000000000000000403612052470045014274 0ustar 00000000000000Output Files ============ mydumper generates several files during the generation of the dump. Many of these are for the table data itself since every table has at least one file. Metadata -------- When a dump is executed a file called ``.metadata`` is created in the output directory. This contains the start and end time of the dump as well as the master binary log positions if applicable. This is an example of the content of this file:: Started dump at: 2011-05-05 13:57:17 SHOW MASTER STATUS: Log: linuxjedi-laptop-bin.000001 Pos: 106 Finished dump at: 2011-05-05 13:57:17 Table Data ---------- The data from every table is written into a separate file, also if the :option:`--rows ` option is used then each chunk of table will be in a separate file. The file names for this are in the format:: database.table.sql(.gz) or if chunked:: database.table.chunk.sql(.gz) Where 'chunk' is a number padded with up to 5 zeros. Table Schemas ------------- When the :option:`--schemas ` option is used mydumper will create a file for the schema of every table it is writing data for. The files for this are in the following format:: database.table-schema.sql(.gz) Binary Logs ----------- Binary logs are retrieved when :option:`--binlogs ` option has been set. This will store them in the ``binlog_snapshot/`` sub-directory inside the dump directory. The binary log files have the same filename as the MySQL server that supplies them and will also have a .gz on the end if they are compressed. Daemon mode ----------- Daemon mode does things a little differently. There are the directories ``0`` and ``1`` inside the dump directory. These alternate when dumping so that if mydumper fails for any reason there is still a good snapshot. When a snapshot dump is complete the ``last_dump`` symlink is updated to point to that dump. If binary logging is enabled mydumper will connect as if it is a slave server and constantly retreives the binary logs into the ``binlogs`` subdirectory. mydumper-0.5.2/docs/index.rst0000644000000000000000000000071012052470045014274 0ustar 00000000000000.. MySQL Data Dumper documentation master file You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to MySQL Data Dumper's documentation! ============================================= Contents: .. toctree:: :maxdepth: 2 authors compiling mydumper_usage myloader_usage files examples Indices and tables ================== * :ref:`genindex` * :ref:`search` mydumper-0.5.2/docs/mydumper_usage.rst0000644000000000000000000000676612052470045016234 0ustar 00000000000000Mydumper Usage ============== Synopsis -------- :program:`mydumper` [:ref:`OPTIONS `] Description ----------- :program:`mydumper` is a tool used for backing up MySQL database servers much faster than the mysqldump tool distributed with MySQL. It also has the capability to retrieve the binary logs from the remote server at the same time as the dump itself. The advantages of mydumper are: * Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall) * Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data) * Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc * Manageability - supports PCRE for specifying database and tables inclusions and exclusions .. _mydumper-options-label: Options ------- The :program:`mydumper` tool has several available options: .. program:: mydumper .. option:: --help, -? Show help text .. option:: --host, -h Hostname of MySQL server to connect to (default localhost) .. option:: --user, -u MySQL username with the correct privileges to execute the dump .. option:: --password, -p The corresponding password for the MySQL user .. option:: --port, -P The port for the MySQL connection. .. note:: For localhost TCP connections use 127.0.0.1 for :option:`--host`. .. option:: --socket, -S The UNIX domain socket file to use for the connection .. option:: --database, -B Database to dump .. option:: --table-list, -T A comma separated list of tables to dump .. option:: --threads, -t The number of threads to use for dumping data, default is 4 .. note:: Other threads are used in mydumper, this option does not control these .. option:: --outputdir, -o Output directory name, default is export-YYYYMMDD-HHMMSS .. option:: --statement-size, -s The maximum size for an insert statement before breaking into a new statement, default 1,000,000 bytes .. option:: --rows, -r Split table into chunks of this many rows, default unlimited .. option:: --compress, -c Compress the output files .. option:: --compress-input, -C Use client protocol compression for connections to the MySQL server .. option:: --build-empty-files, -e Create empty dump files if there is no data to dump .. option:: --regex, -x A regular expression to match against database and table .. option:: --ignore-engines, -i Comma separated list of storage engines to ignore .. option:: --no-schemas, -m Do not dump schemas with the data .. option:: --long-query-guard, -l Timeout for long query execution in seconds, default 60 .. option:: --kill-long-queries, -k Kill long running queries instead of aborting the dump .. option:: --version, -V Show the program version and exit .. option:: --verbose, -v The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info. Default is 2. .. option:: --binlogs, -b Get the binlogs from the server as well as the dump files .. option:: --daemon, -D Enable daemon mode .. option:: --snapshot-interval, -I Interval between each dump snapshot (in minutes), requires :option:`--daemon`, default 60 (minutes) .. option:: --logfile, -L A file to log mydumper output to instead of console output. Useful for daemon mode. .. option:: --no-locks, -k Do not execute the temporary shared read lock. .. warning:: This will cause inconsistent backups. mydumper-0.5.2/docs/myloader_usage.rst0000644000000000000000000000413212052470045016167 0ustar 00000000000000Myloader Usage ============== Synopsis -------- :program:`myloader` :option:`--directory ` = /path/to/mydumper/backup [:ref:`OPTIONS `] Description ----------- :program:`myloader` is a tool used for multi-threaded restoration of mydumper backups. .. _myloader-options-label: Options ------- The :program:`myloader` tool has several available options: .. program:: myloader .. option:: --help, -? Show help text .. option:: --host, -h Hostname of MySQL server to connect to (default localhost) .. option:: --user, -u MySQL username with the correct privileges to execute the restoration .. option:: --password, -p The corresponding password for the MySQL user .. option:: --port, -P The port for the MySQL connection. .. note:: For localhost TCP connections use 127.0.0.1 for :option:`--host`. .. option:: --socket, -S The UNIX domain socket file to use for the connection .. option:: --threads, -t The number of threads to use for restoring data, default is 4 .. option:: --version, -V Show the program version and exit .. option:: --compress-protocol, -C Use client protocol compression for connections to the MySQL server .. option:: --directory, -d The directory of the mydumper backup to restore .. option:: --database, -B An alternative database to load the dump into .. note:: For use with single database dumps. When using with multi-database dumps that have duplicate table names in more than one database it may cause errors. Alternatively this scenario may give unpredictable results with :option:`--overwrite-tables`. .. option:: --queries-per-transaction, -q Number of INSERT queries to execute per transaction during restore, default is 1000. .. option:: --overwrite-tables, -o Drop any existing tables when restoring schemas .. option:: --enable-binlog, -e Log the data loading in the MySQL binary log if enabled (off by default) .. option:: --verbose, -v The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info. Default is 2. mydumper-0.5.2/docs/_build/conf.py.in0000644000000000000000000001603512052470045015604 0ustar 00000000000000# -*- coding: utf-8 -*- # # MySQL Data Dumper documentation build configuration file, created by # sphinx-quickstart on Tue Apr 26 11:44:25 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'@PROJECT_NAME@' copyright = u'2011, Andrew Hutchings' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '@VERSION@' # The full version, including alpha/beta/rc tags. release = '@VERSION@' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'MySQLDataDumperdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'MySQLDataDumper.tex', u'@PROJECT_NAME@ Documentation', u'Andrew Hutchings', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('mydumper_usage', 'mydumper', u'@PROGRAM_DESC@', [u'Andrew Hutchings'], 1), ('myloader_usage', 'myloader', u'@PROGRAM_DESC@', [u'Andrew Hutchings'], 1) ] mydumper-0.5.2/docs/_build/sources.cmake.in0000644000000000000000000000104712052470045016767 0ustar 00000000000000# This script recursively copies all ReST documents from the source directory to # the binary directory. CMAKE_CURRENT_SOURCE_DIR and SOURCES_DIR are substituted # upon the cmake stage. The script is executed upon the make stage to ensure # that the binary sources directory is always up to date. file(GLOB SOURCES RELATIVE "@CMAKE_CURRENT_SOURCE_DIR@" "@CMAKE_CURRENT_SOURCE_DIR@/*.rst" ) foreach(source ${SOURCES}) configure_file( "@CMAKE_CURRENT_SOURCE_DIR@/${source}" "@SOURCES_DIR@/${source}" COPYONLY ) endforeach(source)